Example #1
0
    def build_model_layers(self):
        # here you build the tensorflow graph of any model you want
        self.is_training = tf.placeholder(tf.bool)

        self.graphs = tf.placeholder(
            tf.float32, shape=[None, self.config.node_labels + 1, None,
                               None])  # X data

        new_suffix = self.config.architecture.new_suffix  # True or False

        # build network architecture using config file
        net = self.graphs
        net = blocks.regular_block(net, 'b0', self.config,
                                   self.config.architecture.block_features[0],
                                   self.is_training)
        if new_suffix:
            hidden_outputs = [net]

        for layer in range(1, len(self.config.architecture.block_features)):
            net = blocks.regular_block(
                net, 'b{}'.format(layer), self.config,
                self.config.architecture.block_features[layer],
                self.is_training)
            if new_suffix:
                hidden_outputs.append(net)

        if not new_suffix:
            # Old suffix implementation - suffix (i) from paper
            net = layers.diag_offdiag_maxpool(net)
            net = layers.fully_connected(net, 512, "fully1")
            net = layers.fully_connected(net, 256, "fully2")
            out = layers.fully_connected(net,
                                         self.config.num_classes,
                                         "fully3",
                                         activation_fn=None)

        # New suffix
        if new_suffix:
            # New suffix implementation - suffix (ii) from paper
            out = 0
            for i, h in enumerate(hidden_outputs):
                pooled_h = layers.diag_offdiag_maxpool(h)
                fully = layers.fully_connected(pooled_h,
                                               self.config.num_classes,
                                               "fully{}".format(i),
                                               activation_fn=None)
                out += fully

        return out
    def forward(self, input):
        x = input
        scores = torch.tensor(0, device=input.device, dtype=x.dtype)

        for i, block in enumerate(self.reg_blocks):

            x = block(x)

            if self.config.architecture.new_suffix:
                # use new suffix
                scores = self.fc_layers[i](layers.diag_offdiag_maxpool(x)) + scores

        if not self.config.architecture.new_suffix:
            # old suffix
            bs = x.shape[0]
            x_max = torch.max(x, dim=2)[0]
            x_sum = torch.sum(x, dim=2)
            if self.use_histogram:
                hist = self.calculate_histogram(x_max, x_sum)
            x = layers.diag_offdiag_maxpool(x)  # NxFxMxM -> Nx2F
            for fc in self.fc_layers:
                x = fc(x)
            scores = x

        # If SimGNN, pair up and apply anothe 3-layer MLP
        # Afterwards scores has shape (n,n)
        if self.is_SimGNN:
            n = scores.shape[0]
            if self.use_NTN:
                x = self.SimGNN_NTN(scores)
            else:
                d = scores.shape[1]
                input1 = scores.expand(n,n,d)
                input2 = torch.transpose(input1,0,1)
                x = torch.reshape(torch.cat((input1,input2), 2) , (n*n,2*d))
            if self.use_histogram:
                x = torch.cat((x, hist), -1)
            for fc in self.SimGNN_layers:
                x = fc(x)
            geds = torch.reshape(x, (n,n))

        return geds
Example #3
0
    def forward(self, input):
        x = input
        scores = torch.tensor(0, device=input.device, dtype=x.dtype)

        for i, block in enumerate(self.reg_blocks):

            x = block(x)

            if self.config.architecture.new_suffix:
                # use new suffix
                scores = self.fc_layers[i](
                    layers.diag_offdiag_maxpool(x)) + scores

        if not self.config.architecture.new_suffix:
            # old suffix
            x = layers.diag_offdiag_maxpool(x)  # NxFxMxM -> Nx2F
            for fc in self.fc_layers:
                x = fc(x)
            scores = x

        return scores
    def build_model(self):
        # here you build the tensorflow graph of any model you want and define the loss.
        self.is_training = tf.placeholder(tf.bool)

        self.graphs = tf.placeholder(
            tf.float32, shape=[None, self.config.node_labels + 1, None, None])
        self.labels = tf.placeholder(tf.int32, shape=[None])

        # build network architecture using config file
        net = eq.equi_2_to_2('equi0', self.data.train_graphs[0].shape[0],
                             self.config.architecture[0], self.graphs)
        net = tf.nn.relu(net, name='relu0')
        for layer in range(1, len(self.config.architecture)):
            net = eq.equi_2_to_2('equi%d' % layer,
                                 self.config.architecture[layer - 1],
                                 self.config.architecture[layer], net)
            net = tf.nn.relu(net, name='relu%d' % layer)

        net = layers.diag_offdiag_maxpool(net)

        net = layers.fully_connected(net, 512, "fully1")
        net = layers.fully_connected(net, 256, "fully2")
        net = layers.fully_connected(net,
                                     self.config.num_classes,
                                     "fully3",
                                     activation_fn=None)

        # define loss function
        with tf.name_scope("loss"):
            self.loss = tf.reduce_sum(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    labels=self.labels, logits=net))
            self.correct_predictions = tf.reduce_sum(
                tf.cast(
                    tf.equal(tf.argmax(net, 1, output_type=tf.int32),
                             self.labels), tf.int32))

        # get learning rate with decay every 20 epochs
        learning_rate = self.get_learning_rate(self.global_step_tensor,
                                               self.data.train_size * 20)

        # choose optimizer
        if self.config.optimizer == 'momentum':
            self.optimizer = tf.train.MomentumOptimizer(
                learning_rate, momentum=self.config.momentum)
        elif self.config.optimizer == 'adam':
            self.optimizer = tf.train.AdamOptimizer(learning_rate)

        # define train step
        self.train_op = self.optimizer.minimize(
            self.loss, global_step=self.global_step_tensor)
Example #5
0
    def __init__(self, config, data):
        super(invariant_basic, self).__init__(config)
        self.data = data
#         self.init_saver()
        
        self.is_training = torch.autograd.Variable(torch.ones(1, dtype=torch.bool))
        
        equi_2_to_2_list = [eq.equi_2_to_2(self.data.train_graphs[0].shape[0], self.config.architecture[0], self.config.device)]
        equi_2_to_2_list.append(torch.nn.ReLU())
        
        for layer in range(1, len(self.config.architecture)):
            equi_2_to_2_list.append(eq.equi_2_to_2(self.config.architecture[layer-1], self.config.architecture[layer], self.config.device))
            equi_2_to_2_list.append(torch.nn.ReLU())
            
        equi_2_to_2_list.append(layers.diag_offdiag_maxpool())

        equi_2_to_2_list.append(layers.fully_connected(128, 512))
        equi_2_to_2_list.append(layers.fully_connected(512, 256))
        equi_2_to_2_list.append(layers.fully_connected(256, self.config.num_classes, activation_fn=None))

        self.net = torch.nn.ModuleList(equi_2_to_2_list)
        
        # define loss function
        self.loss = torch.nn.CrossEntropyLoss()
#         self.loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels, logits=net))
#         self.correct_predictions = tf.reduce_sum(tf.cast(tf.equal(tf.argmax(net, 1, output_type=tf.int32), self.labels), tf.int32))

        # choose optimizer
        if self.config.optimizer == 'momentum':
            self.optimizer = torch.optim.SGD(list(self.net.parameters()), lr=self.config.learning_rate, momentum=self.config.momentum)
        elif self.config.optimizer == 'adam':
            self.optimizer = torch.optim.Adam(list(self.net.parameters()), lr=self.config.learning_rate)
            
        # get learning rate with decay every 20 epochs
        self.learning_rate_scheduler = torch.optim.lr_scheduler.StepLR(
            self.optimizer,
            self.data.train_size*20,
            self.config.decay_rate/10,  # Decay rate. Artificially divided by 10 due to change in lr scheduler use. This should be changed if to match og code.
            )