Exemplo n.º 1
0
    def run_epoch(self, epoch_name: str, epoch_num, data, is_training: bool):
        loss = 0
        edge_loss, kl_loss, node_symbol_loss = 0, 0, 0
        start_time = time.time()
        processed_graphs = 0
        batch_iterator = ThreadedIterator(self.make_minibatch_iterator(
            data, is_training),
                                          max_queue_size=5)

        for step, batch_data in enumerate(batch_iterator):
            num_graphs = batch_data[self.placeholders['num_graphs']]
            processed_graphs += num_graphs
            batch_data[self.placeholders['is_generative']] = False
            # Randomly sample from normal distribution
            batch_data[self.placeholders['z_prior']] = utils.generate_std_normal(\
                self.params['batch_size'], batch_data[self.placeholders['num_vertices']],self.params['encoding_size'])
            batch_data[self.placeholders['z_prior_in']] = utils.generate_std_normal(\
                self.params['batch_size'], batch_data[self.placeholders['num_vertices']],self.params['hidden_size'])

            if is_training:
                batch_data[self.placeholders[
                    'out_layer_dropout_keep_prob']] = self.params[
                        'out_layer_dropout_keep_prob']
                fetch_list = [
                    self.ops['loss'], self.ops['mean_edge_loss_in'],
                    self.ops['mean_kl_loss_in'],
                    self.ops['mean_node_symbol_loss_in'],
                    self.ops['train_step']
                ]
            else:
                batch_data[
                    self.placeholders['out_layer_dropout_keep_prob']] = 1.0
                fetch_list = [
                    self.ops['loss'], self.ops['mean_edge_loss_in'],
                    self.ops['mean_kl_loss_in'],
                    self.ops['mean_node_symbol_loss_in']
                ]

            result = self.sess.run(fetch_list, feed_dict=batch_data)

            batch_loss = result[0]
            loss += batch_loss * num_graphs

            edge_loss += result[1] * num_graphs
            kl_loss += result[2] * num_graphs
            node_symbol_loss += result[3] * num_graphs

            print(
                "Running %s, batch %i (has %i graphs). Loss so far: %.4f. Edge loss: %.4f, KL loss: %.4f, Node symbol loss: %.4f"
                % (epoch_name, step, num_graphs, loss / processed_graphs,
                   edge_loss / processed_graphs, kl_loss / processed_graphs,
                   node_symbol_loss / processed_graphs),
                end='\r')

        loss = loss / processed_graphs
        edge_loss = edge_loss / processed_graphs
        kl_loss = kl_loss / processed_graphs
        node_symbol_loss = node_symbol_loss / processed_graphs
        instance_per_sec = processed_graphs / (time.time() - start_time)
        return (loss, edge_loss, kl_loss, node_symbol_loss), instance_per_sec
Exemplo n.º 2
0
    def run_epoch(self, epoch_name: str, epoch_num, data, is_training: bool):
        loss = 0
        start_time = time.time()
        processed_graphs = 0
        batch_iterator = ThreadedIterator(self.make_minibatch_iterator(
            data, is_training),
                                          max_queue_size=5)

        for step, batch_data in enumerate(batch_iterator):
            num_graphs = batch_data[self.placeholders['num_graphs']]
            processed_graphs += num_graphs
            batch_data[self.placeholders['is_generative']] = False
            # Randomly sample from normal distribution
            batch_data[self.placeholders['z_prior']] = utils.generate_std_normal(\
                self.params['batch_size'], batch_data[self.placeholders['num_vertices']],self.params['hidden_size'])
            if is_training:
                batch_data[self.placeholders[
                    'out_layer_dropout_keep_prob']] = self.params[
                        'out_layer_dropout_keep_prob']
                fetch_list = [
                    self.ops['loss'], self.ops['train_step'],
                    self.ops["edge_loss"], self.ops['kl_loss'],
                    self.ops['node_symbol_prob'],
                    self.placeholders['node_symbols'],
                    self.ops['qed_computed_values'],
                    self.placeholders['target_values'],
                    self.ops['total_qed_loss'], self.ops['mean'],
                    self.ops['logvariance'], self.ops['grads'],
                    self.ops['mean_edge_loss'],
                    self.ops['mean_node_symbol_loss'],
                    self.ops['mean_kl_loss'], self.ops['mean_total_qed_loss']
                ]
            else:
                batch_data[
                    self.placeholders['out_layer_dropout_keep_prob']] = 1.0
                fetch_list = [
                    self.ops['mean_edge_loss'], self.ops['accuracy_task0']
                ]
            result = self.sess.run(fetch_list, feed_dict=batch_data)
            """try:
                if is_training:
                    self.save_intermediate_results(batch_data[self.placeholders['adjacency_matrix']], 
                        result[11], result[12], result[4], result[5], result[9], result[10], result[6], result[7], result[13], result[14])
            except IndexError:
                pass"""

            batch_loss = result[0]
            loss += batch_loss * num_graphs

            print("Running %s, batch %i (has %i graphs). Loss so far: %.4f" %
                  (epoch_name, step, num_graphs, loss / processed_graphs),
                  end='\r')
        loss = loss / processed_graphs
        instance_per_sec = processed_graphs / (time.time() - start_time)
        return loss, instance_per_sec
Exemplo n.º 3
0
    def run_epoch(self, epoch_name: str, epoch_num, data, is_training: bool):
        loss = 0
        mean_edge_loss = 0
        mean_node_loss = 0
        mean_kl_loss = 0
        mean_qed_loss = 0
        node_loss_error = -10000000
        node_pred_error = 0
        start_time = time.time()
        processed_graphs = 0
        if is_training and self.params['num_teacher_forcing'] >= epoch_num:
            teacher_forcing = True
        else:
            teacher_forcing = False
        batch_iterator = ThreadedIterator(
            self.make_minibatch_iterator(data, is_training),
            max_queue_size=self.params['batch_size']
        )  # self.params['batch_size'])

        for step, batch_data in enumerate(batch_iterator):
            num_graphs = batch_data[self.placeholders['num_graphs']]
            processed_graphs += num_graphs
            batch_data[self.placeholders['is_generative']] = False
            batch_data[self.placeholders[
                'use_teacher_forcing_nodes']] = teacher_forcing
            batch_data[
                self.placeholders['z_prior']] = utils.generate_std_normal(
                    self.params['batch_size'],
                    batch_data[self.placeholders['num_vertices']],
                    self.params['hidden_size_encoder'])

            if is_training:
                batch_data[self.placeholders[
                    'out_layer_dropout_keep_prob']] = self.params[
                        'out_layer_dropout_keep_prob']
                fetch_list = [
                    self.ops['loss'], self.ops['train_step'],
                    self.ops["edge_loss"], self.ops['kl_loss'],
                    self.ops['node_symbol_prob'],
                    self.placeholders['node_symbols'],
                    self.ops['qed_computed_values'],
                    self.placeholders['target_values'],
                    self.ops['total_qed_loss'], self.ops['mean'],
                    self.ops['logvariance'], self.ops['grads'],
                    self.ops['mean_edge_loss'],
                    self.ops['mean_node_symbol_loss'],
                    self.ops['mean_kl_loss'], self.ops['mean_total_qed_loss'],
                    self.ops['grads2'], self.ops['node_loss_error'],
                    self.ops['node_pred_error']
                ]
            else:
                batch_data[
                    self.placeholders['out_layer_dropout_keep_prob']] = 1.0
                fetch_list = [
                    self.ops['loss'], self.ops['mean_edge_loss'],
                    self.ops['mean_node_symbol_loss'],
                    self.ops['mean_kl_loss'], self.ops['mean_total_qed_loss'],
                    self.ops['sampled_atoms'], self.ops['node_loss_error'],
                    self.ops['node_pred_error']
                ]
            result = self.sess.run(fetch_list, feed_dict=batch_data)
            batch_loss = result[0]
            loss += batch_loss * num_graphs
            if is_training:
                mean_edge_loss += result[12] * num_graphs
                mean_node_loss += result[13] * num_graphs
                mean_kl_loss += result[14] * num_graphs
                mean_qed_loss += result[15] * num_graphs
                node_loss_error = max(node_loss_error, np.max(result[17]))
                node_pred_error += result[18]
            else:
                mean_edge_loss += result[1] * num_graphs
                mean_node_loss += result[2] * num_graphs
                mean_kl_loss += result[3] * num_graphs
                mean_qed_loss += result[4] * num_graphs
                node_loss_error = max(node_loss_error, np.max(result[6]))
                node_pred_error += result[7]

            print(
                "Running %s, batch %i (has %i graphs). Total loss: %.4f. Edge loss: %.4f. Node loss: %.4f. KL loss: %.4f. Property loss: %.4f. Node error: %.4f. Node pred: %.4f."
                % (epoch_name, step, num_graphs, loss / processed_graphs,
                   mean_edge_loss / processed_graphs, mean_node_loss /
                   processed_graphs, mean_kl_loss / processed_graphs,
                   mean_qed_loss / processed_graphs, node_loss_error,
                   node_pred_error / processed_graphs),
                end='\r')

        mean_edge_loss /= processed_graphs
        mean_node_loss /= processed_graphs
        mean_kl_loss /= processed_graphs
        mean_qed_loss /= processed_graphs
        loss = loss / processed_graphs
        instance_per_sec = processed_graphs / (time.time() - start_time)
        return loss, mean_edge_loss, mean_node_loss, mean_kl_loss, mean_qed_loss, instance_per_sec