예제 #1
0
    def infer(self, raw_graph_data, **kwargs):
        graphs = [self.process_raw_graph(g) for g in raw_graph_data]

        batch_iterator = utils.ThreadedIterator(self.make_minibatch_iterator(
            graphs, is_training=False),
                                                max_queue_size=50)

        preds = []
        for step, batch_data in enumerate(batch_iterator):
            batch_data[self.placeholders['out_layer_dropout_keep_prob']] = 1.0
            fetch_list = [
                self.ops['probabilities'],
                self.placeholders['elem_graph_nodes_list'],
                self.placeholders['elements_true']
            ]

            result = self.sess.run(fetch_list, feed_dict=batch_data)
            groupings = collections.defaultdict(list)
            for (discard_prob, keep_prob), graph_id, candidate in zip(*result):
                groupings[graph_id].append(
                    (discard_prob, keep_prob, len(groupings[graph_id])))

            preds += list(groupings.values())

        return preds
    def infer(self, raw_graph_data, **kwargs):
        graphs = [self.process_raw_graph(g) for g in raw_graph_data]

        batch_iterator = utils.ThreadedIterator(self.make_minibatch_iterator(graphs, is_training=False),
                                                max_queue_size=50)

        preds = []

        for step, batch_data in enumerate(batch_iterator):
            batch_data[self.placeholders['out_layer_dropout_keep_prob']] = 1.0
            fetch_list = [self.ops['preds'], self.ops['probs']]

            result = self.sess.run(fetch_list, feed_dict=batch_data)

            for p, v in zip(result[1], result[0]):
                preds.append(list(zip(p, v)))

        return preds
예제 #3
0
    def infer(self, raw_graph_data, **kwargs):
        graphs = [self.process_raw_graph(g) for g in raw_graph_data]
        graphs = [g for g in graphs if g is not None]

        batch_iterator = utils.ThreadedIterator(self.make_minibatch_iterator(
            graphs, is_training=False),
                                                max_queue_size=50)

        preds = []
        max_length = self.params['max_length']

        for step, batch_data in enumerate(batch_iterator):
            batch_data[self.placeholders['out_layer_dropout_keep_prob']] = 1.0
            fetch_list = [
                self.ops['softmax_values'],
                self.placeholders['graph_ids_timestep'],
                self.placeholders['elements_true']
            ]

            result = self.sess.run(fetch_list, feed_dict=batch_data)
            softmax_values, graph_ids, elements = result
            elements = np.concatenate([elements] * max_length, axis=0)
            groupings = collections.defaultdict(list)
            for probs, graph_id, element in zip(softmax_values, graph_ids,
                                                elements):
                groupings[graph_id].append((probs, element))

            for graph_id, flat_preds in groupings.items():
                num_elems = len(flat_preds) // max_length
                graph_preds = []
                for i in range(0, len(flat_preds), num_elems):
                    graph_preds.append(
                        sorted(flat_preds[i:i + num_elems],
                               key=lambda x: -x[0]))

                preds.append(graph_preds)

        return preds
    def analyze(self, test_data):
        with self.graph.as_default():
            batch_iterator = utils.ThreadedIterator(
                self.make_minibatch_iterator(test_data, False),
                max_queue_size=5)
            fetch_list = self.get_fetch_list()
            processed_graphs = 0

            for step, batch_data in enumerate(batch_iterator):
                batch_data[
                    self.placeholders['out_layer_dropout_keep_prob']] = 1.0
                num_graphs = batch_data[
                    self.placeholders['num_graphs_in_batch']]
                processed_graphs += num_graphs
                print("Running Analysis, batch {} (has {} graphs).".format(
                    step, num_graphs),
                      end='\r')

                result = self.sess.run(fetch_list, feed_dict=batch_data)
                self.analyze_result(num_graphs, result)

        print("\n-----\n")
        return self.finish_analysis()