コード例 #1
0
ファイル: h3d_conv.py プロジェクト: mverzett/DeepHGCal
    def test(self):
        self.num_batch = 1
        self.initialize()
        print("Beginning to test network with parameters", get_num_parameters(self.model.get_variable_scope()))

        placeholders = self.model.get_placeholders()
        graph_loss = self.model.get_losses()
        graph_optmiser = self.model.get_optimizer()
        graph_summary = self.model.get_summary()
        graph_summary_validation = self.model.get_summary_validation()
        graph_confusion_matrix = self.model.get_confusion_matrix()
        graph_accuracy = self.model.get_accuracy()
        graph_logits, graph_prediction = self.model.get_compute_graphs()
        graph_temp = self.model.get_temp()

        inputs_feed = self.__get_input_feeds(self.test_files, repeat=False)

        accuracy_sum = 0
        num_examples = 0

        confusion_matrix = np.zeros((self.num_classes, self.num_classes), dtype=np.float32)

        init = [tf.global_variables_initializer(), tf.local_variables_initializer()]
        with tf.Session() as sess:
            sess.run(init)
            print("Beginning to test network with parameters", get_num_parameters(self.model.get_variable_scope()))

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            self.saver_sparse.restore(sess, self.model_path)
            print("\n\nINFO: Loading model\n\n")
            iteration_number = 0

            labels = np.zeros((1000000, self.num_classes))
            scores = np.zeros((1000000, self.num_classes))

            print("Starting iterations")
            while iteration_number < 1000000:
                try:
                    inputs = sess.run(inputs_feed)
                except tf.errors.OutOfRangeError:
                    break

                inputs_train_dict = {
                    placeholders[0]: inputs[0],
                    placeholders[1]: inputs[1]
                }
                labels[iteration_number] = np.squeeze(inputs[1])

                t, eval_loss, eval_accuracy, eval_confusion, test_logits, eval_logits = sess.run(
                    [graph_temp, graph_loss, graph_accuracy, graph_confusion_matrix, graph_prediction, graph_logits],
                    feed_dict=inputs_train_dict)

                confusion_matrix += eval_confusion
                accuracy_sum += eval_accuracy * self.num_batch
                num_examples += self.num_batch

                scores[iteration_number] = np.squeeze(eval_logits)

                print("Test - Batch %4d: loss %0.5f accuracy %03.3f accuracy (cumm) %03.3f" % (
                iteration_number, eval_loss, eval_accuracy, accuracy_sum / num_examples))
                iteration_number += 1

            # Stop the threads
            coord.request_stop()

            # Wait for threads to stop
            coord.join(threads)

        classes_names = 'Electron', 'Muon', 'Pion Charged', 'Pion Neutral', 'K0 Long', 'K0 Short' # TODO: Pick from config

        test_result = ClassificationModelTestResult()
        test_result.initialize(confusion_matrix, labels, scores, self.model.get_human_name(),
                               get_num_parameters(self.model.get_variable_scope()), classes_names, self.summary_path)
        test_result.evaluate(self.test_out_path)

        print("Evaluation complete")
        print("Evaluation accuracy ", accuracy_sum / num_examples)
        print("Confusion matrix:")
        print(confusion_matrix)
コード例 #2
0
    def train(self):
        self.initialize()
        print("Beginning to train network with parameters",
              get_num_parameters(self.model.get_variable_scope()))
        placeholders = self.model.get_placeholders()
        graph_loss = self.model.get_losses()
        graph_optmiser = self.model.get_optimizer()
        graph_summary = self.model.get_summary()
        graph_summary_validation = self.model.get_summary_validation()
        graph_accuracy = self.model.get_accuracy()
        graph_logits, graph_prediction = self.model.get_compute_graphs()
        graph_temp = self.model.get_temp()

        place_holder_control_switch = self.model.get_place_holder_switch_control(
        )

        if self.from_scratch:
            self.clean_summary_dir()

        inputs_feed = self._get_input_feeds(self.training_files)
        inputs_validation_feed = self._get_input_feeds(self.validation_files)

        init = [
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        ]
        with tf.Session() as sess:
            sess.run(init)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            summary_writer = tf.summary.FileWriter(self.summary_path,
                                                   sess.graph)

            if not self.from_scratch:
                self.saver_sparse.restore(sess, self.model_path)
                print("\n\nINFO: Loading model\n\n")
                with open(self.model_path + '.txt', 'r') as f:
                    iteration_number = int(f.read())
            else:
                iteration_number = 0

            print("Starting iterations")
            while iteration_number < self.train_for_iterations:
                number = int(iteration_number / self.switch_after) % 2
                switch_value = np.zeros((2), dtype=np.int64)
                switch_value[number] = 1
                switch_value = [1, 1]  # TODO: Remove this later

                inputs_train = sess.run(list(inputs_feed))

                inputs_train_dict = {
                    placeholders[0]: inputs_train[0],
                    placeholders[1]: inputs_train[1],
                    placeholders[2]: inputs_train[2],
                    placeholders[3]: inputs_train[3],
                    placeholders[4]: inputs_train[4],
                    place_holder_control_switch: switch_value
                }

                t, eval_loss, _, eval_summary, eval_accuracy, test_logits = sess.run(
                    [
                        graph_temp, graph_loss, graph_optmiser, graph_summary,
                        graph_accuracy, graph_prediction
                    ],
                    feed_dict=inputs_train_dict)

                if iteration_number % self.validate_after == 0:
                    inputs_validation = sess.run(list(inputs_validation_feed))
                    inputs_validation_dict = {
                        placeholders[0]: inputs_validation[0],
                        placeholders[1]: inputs_validation[1],
                        placeholders[2]: inputs_validation[2],
                        placeholders[3]: inputs_validation[3],
                        placeholders[4]: inputs_validation[4],
                        place_holder_control_switch: switch_value
                    }

                    eval_loss_validation, eval_summary_validation, eval_accuracy_validation = sess.run(
                        [graph_loss, graph_summary_validation, graph_accuracy],
                        feed_dict=inputs_validation_dict)
                    summary_writer.add_summary(eval_summary_validation,
                                               iteration_number)
                    print(
                        "Validation - Iteration %4d: loss %0.5f accuracy %03.3f"
                        % (iteration_number, eval_loss_validation,
                           eval_accuracy_validation))

                print(
                    "Training   - Iteration %4d: loss %0.5f accuracy %03.3f" %
                    (iteration_number, eval_loss, eval_accuracy))
                print(t[0])
                iteration_number += 1
                summary_writer.add_summary(eval_summary, iteration_number)
                if iteration_number % self.save_after_iterations == 0:
                    print("\n\nINFO: Saving model\n\n")
                    self.saver_sparse.save(sess, self.model_path)
                    with open(self.model_path + '.txt', 'w') as f:
                        f.write(str(iteration_number))

            # Stop the threads
            coord.request_stop()

            # Wait for threads to stop
            coord.join(threads)
コード例 #3
0
    def test(self):
        self.initialize_test()
        print("Beginning to test network with parameters",
              get_num_parameters(self.model.get_variable_scope()))
        placeholders = self.model.get_placeholders()
        graph_loss = self.model.get_losses()
        graph_optmiser = self.model.get_optimizer()
        graph_summary = self.model.get_summary()
        graph_summary_validation = self.model.get_summary_validation()
        graph_output = self.model.get_compute_graphs()
        graph_temp = self.model.get_temp()

        inputs_feed = self.reader_factory.get_class(self.reader_type)(
            self.test_files, self.num_max_entries, self.num_data_dims,
            self.num_batch).get_feeds(shuffle=False)
        inference_streamer = InferenceOutputStreamer(
            output_path=self.test_out_path, cache_size=100)
        inference_streamer.start_thread()
        print(type(inputs_feed))
        print("****************************************")
        print("Test Input shape: ", inputs_feed.get_shape().as_list())
        init = [
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        ]
        with tf.Session() as sess:
            sess.run(init)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            self.saver_sparse.restore(sess, self.model_path)
            print("\n\nINFO: Loading model", self.model_path, "\n\n")

            print("Starting testing")
            iteration_number = 0
            while iteration_number < int(
                    np.ceil(self.num_testing_samples / self.num_batch)):
                inputs_test = sess.run(list(inputs_feed))

                if len(placeholders) == 5:
                    inputs_train_dict = {
                        placeholders[0]:
                        inputs_test[0][:, :, self.spatial_features_indices],
                        placeholders[1]:
                        inputs_test[0][:, :,
                                       self.spatial_features_local_indices],
                        placeholders[2]:
                        inputs_test[0][:, :, self.other_features_indices],
                        placeholders[3]:
                        inputs_test[0][:, :, self.target_indices],
                        placeholders[4]:
                        inputs_test[1],
                        self.model.is_train:
                        False,
                        self.model.learning_rate:
                        0
                    }
                else:
                    inputs_train_dict = {
                        placeholders[0]:
                        inputs_test[0][:, :, self.spatial_features_indices],
                        placeholders[1]:
                        inputs_test[0][:, :,
                                       self.spatial_features_local_indices],
                        placeholders[2]:
                        inputs_test[0][:, :, self.other_features_indices],
                        placeholders[3]:
                        inputs_test[0][:, :, self.target_indices],
                        placeholders[4]:
                        inputs_test[1],
                        # placeholders[5]: inputs_test[2],
                        self.model.is_train:
                        False,
                        self.model.learning_rate:
                        0
                    }
                t, eval_loss, eval_output = sess.run(
                    [graph_temp, graph_loss, graph_output],
                    feed_dict=inputs_train_dict)

                print("Adding", len(inputs_test[0]), "test results")
                for i in range(len(inputs_test[0])):
                    if not self.output_seed_indices:
                        inference_streamer.add(
                            (inputs_test[0][i], (inputs_test[1])[i, 0],
                             eval_output[i]))
                    else:
                        inference_streamer.add(
                            (inputs_test[0][i], (inputs_test[1])[i, 0],
                             eval_output[i], inputs_test[2][i]))

                print("Testing - Sample %4d: loss %0.5f" %
                      (iteration_number * self.num_batch, eval_loss))
                print(t[0])
                iteration_number += 1

            # Stop the threads
            coord.request_stop()

            # Wait for threads to stop
            coord.join(threads)

        inference_streamer.close()
コード例 #4
0
    def visualize(self):
        self.initialize_test()
        print("Beginning to visualize network with parameters",
              get_num_parameters(self.model.get_variable_scope()))
        placeholders = self.model.get_placeholders()
        graph_loss = self.model.get_losses()
        graph_output = self.model.get_compute_graphs()
        graph_temp = self.model.get_temp()
        layer_feats = self.model.temp_feat_visualize

        inputs_feed = self.reader_factory.get_class(self.reader_type)(
            self.test_files, self.num_max_entries, self.num_data_dims,
            self.num_batch).get_feeds(shuffle=False)

        init = [
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        ]
        with tf.Session() as sess:
            sess.run(init)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            self.saver_sparse.restore(sess, self.model_path)
            print("\n\nINFO: Loading model", self.model_path, "\n\n")

            print("Starting visualizing")
            iteration_number = 0
            while iteration_number < int(
                    np.ceil(self.num_testing_samples / self.num_batch)):
                inputs_test = sess.run(list(inputs_feed))
                print("Run")

                if len(placeholders) == 5:
                    inputs_train_dict = {
                        placeholders[0]:
                        inputs_test[0][:, :, self.spatial_features_indices],
                        placeholders[1]:
                        inputs_test[0][:, :,
                                       self.spatial_features_local_indices],
                        placeholders[2]:
                        inputs_test[0][:, :, self.other_features_indices],
                        placeholders[3]:
                        inputs_test[0][:, :, self.target_indices],
                        placeholders[4]:
                        inputs_test[1],
                        self.model.is_train:
                        False,
                        self.model.learning_rate:
                        0
                    }
                else:
                    inputs_train_dict = {
                        placeholders[0]:
                        inputs_test[0][:, :, self.spatial_features_indices],
                        placeholders[1]:
                        inputs_test[0][:, :,
                                       self.spatial_features_local_indices],
                        placeholders[2]:
                        inputs_test[0][:, :, self.other_features_indices],
                        placeholders[3]:
                        inputs_test[0][:, :, self.target_indices],
                        placeholders[4]:
                        inputs_test[1],
                        placeholders[5]:
                        inputs_test[2],
                        self.model.is_train:
                        False,
                        self.model.learning_rate:
                        0
                    }
                eval_out = sess.run([graph_temp, graph_loss, graph_output] +
                                    layer_feats,
                                    feed_dict=inputs_train_dict)
                layer_outs = eval_out[3:]
                prediction = eval_out[2]

                if iteration_number * self.num_batch + self.num_batch >= 32:
                    for x in range(32):
                        event_number = (32 + x) % self.num_batch
                        print("Event number", event_number)
                        seed_index = inputs_test[2][event_number, :]
                        print(seed_index)
                        spatial_features = inputs_test[0][
                            event_number, :, :][:,
                                                self.spatial_features_indices]
                        energy = inputs_test[0][event_number, :, :][:, 0]
                        gt = inputs_test[0][
                            event_number, :, :][:, self.target_indices]
                        predictionx = prediction[event_number]
                        layer_outsx = [x[event_number] for x in layer_outs]
                        if 'aggregators' in self.config_name:
                            plots.plot_clustering_layer_wise_visualize_agg(
                                spatial_features, energy, predictionx, gt,
                                layer_outsx, self.config_name)
                        else:
                            plots.plot_clustering_layer_wise_visualize(
                                spatial_features, energy, predictionx, gt,
                                layer_outsx, self.config_name)
                    sys.exit(0)

                # Put the condition here!

                iteration_number += 1

            # Stop the threads
            coord.request_stop()

            # Wait for threads to stop
            coord.join(threads)
コード例 #5
0
    def train(self):
        self.initialize()
        print("Beginning to train network with parameters",
              get_num_parameters(self.model.get_variable_scope()))
        print("Variable scope:", self.model.get_variable_scope())
        placeholders = self.model.get_placeholders()

        if self.from_scratch:
            subprocess.call("mkdir -p %s" % (self.summary_path), shell=True)
            subprocess.call("mkdir -p %s" % (self.test_out_path), shell=True)
            subprocess.call("mkdir -p %s" %
                            (os.path.join(self.test_out_path, 'ops')),
                            shell=True)
            with open(self.model_path + '_code.py', 'w') as f:
                f.write(self.model.get_code())

            ops_parent = os.path.dirname(ops.__file__)
            for ops_file in os.listdir(ops_parent):
                if not ops_file.endswith('.py'):
                    continue
                shutil.copy(os.path.join(ops_parent, ops_file),
                            os.path.join(self.test_out_path, 'ops'))

        graph_loss = self.model.get_losses()
        graph_optmiser = self.model.get_optimizer()
        graph_summary = self.model.get_summary()
        graph_summary_validation = self.model.get_summary_validation()
        graph_output = self.model.get_compute_graphs()
        graph_temp = self.model.get_temp()

        if self.plot_after != -1:
            data_plotting = None  # TODO: Load

        if self.from_scratch:
            self.clean_summary_dir()

        inputs_feed = self.reader_factory.get_class(
            self.reader_type)(self.training_files, self.num_max_entries,
                              self.num_data_dims, self.num_batch).get_feeds()
        inputs_validation_feed = self.reader_factory.get_class(
            self.reader_type)(self.validation_files, self.num_max_entries,
                              self.num_data_dims,
                              self.num_batch).get_feeds(shuffle=False)
        print("\n****************************************")
        print("Feed Input type", type(inputs_feed[0]))
        print("Feed Input shape: ", inputs_feed[0].get_shape().as_list())
        init = [
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        ]

        with tf.Session() as sess:
            sess.run(init)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            summary_writer = tf.summary.FileWriter(self.summary_path,
                                                   sess.graph)

            if not self.from_scratch:
                self.saver_sparse.restore(sess, self.model_path)
                print("\n\nINFO: Loading model\n\n")
                with open(self.model_path + '.txt', 'r') as f:
                    iteration_number = int(f.read())
            else:
                iteration_number = 0

            print("Starting iterations")
            while iteration_number < self.train_for_iterations:
                inputs_train = sess.run(list(inputs_feed))

                print("\n****************************************")
                print("Input Train type", type(inputs_train[0]))
                print("Input Train shape: ", inputs_train[0].shape)

                learning_rate = 1
                if hasattr(self.model, "learningrate_scheduler"):
                    learning_rate = self.model.learningrate_scheduler.get_lr(
                        iteration_number)
                else:
                    learning_rate = self.model.learning_rate
                if iteration_number == 0:
                    print('learning rate ', learning_rate)

                if len(placeholders) == 5:
                    inputs_train_dict = {
                        placeholders[0]:
                        inputs_train[0][:, :, self.spatial_features_indices],
                        placeholders[1]:
                        inputs_train[0][:, :,
                                        self.spatial_features_local_indices],
                        placeholders[2]:
                        inputs_train[0][:, :, self.other_features_indices],
                        placeholders[3]:
                        inputs_train[0][:, :, self.target_indices],
                        placeholders[4]:
                        inputs_train[1],
                        self.model.is_train:
                        True,
                        self.model.learning_rate:
                        learning_rate
                    }
                else:
                    inputs_train_dict = {
                        placeholders[0]:
                        inputs_train[0][:, :, self.spatial_features_indices],
                        placeholders[1]:
                        inputs_train[0][:, :,
                                        self.spatial_features_local_indices],
                        placeholders[2]:
                        inputs_train[0][:, :, self.other_features_indices],
                        placeholders[3]:
                        inputs_train[0][:, :, self.target_indices],
                        placeholders[4]:
                        inputs_train[1],
                        # placeholders[5]: inputs_train[2],
                        self.model.is_train:
                        True,
                        self.model.learning_rate:
                        learning_rate
                    }

                t, eval_loss, _, eval_summary, eval_output = sess.run(
                    [
                        graph_temp, graph_loss, graph_optmiser, graph_summary,
                        graph_output
                    ],
                    feed_dict=inputs_train_dict)

                if self.plot_after != -1:
                    if iteration_number % self.plot_after == 0:
                        pass

                if iteration_number % self.validate_after == 0:
                    inputs_validation = sess.run(list(inputs_validation_feed))
                    self.inputs_plot = inputs_validation

                    if len(placeholders) == 5:
                        inputs_validation_dict = {
                            placeholders[0]:
                            inputs_validation[0]
                            [:, :, self.spatial_features_indices],
                            placeholders[1]:
                            inputs_validation[0]
                            [:, :, self.spatial_features_local_indices],
                            placeholders[2]:
                            inputs_validation[0][:, :,
                                                 self.other_features_indices],
                            placeholders[3]:
                            inputs_validation[0][:, :, self.target_indices],
                            placeholders[4]:
                            inputs_validation[1],
                            self.model.is_train:
                            False,
                            self.model.learning_rate:
                            learning_rate
                        }
                    else:
                        inputs_validation_dict = {
                            placeholders[0]:
                            inputs_validation[0]
                            [:, :, self.spatial_features_indices],
                            placeholders[1]:
                            inputs_validation[0]
                            [:, :, self.spatial_features_local_indices],
                            placeholders[2]:
                            inputs_validation[0][:, :,
                                                 self.other_features_indices],
                            placeholders[3]:
                            inputs_validation[0][:, :, self.target_indices],
                            placeholders[4]:
                            inputs_validation[1],
                            # placeholders[5]: inputs_validation[2],
                            self.model.is_train:
                            False,
                            self.model.learning_rate:
                            learning_rate
                        }

                    eval_loss_validation, eval_summary_validation = sess.run(
                        [graph_loss, graph_summary_validation],
                        feed_dict=inputs_validation_dict)
                    summary_writer.add_summary(eval_summary_validation,
                                               iteration_number)
                    print("Validation - Iteration %4d: loss %.6E" %
                          (iteration_number, eval_loss_validation))

                print("Training   - Iteration %4d: loss %0.6E" %
                      (iteration_number, eval_loss))
                print(t[0])
                iteration_number += 1
                summary_writer.add_summary(eval_summary, iteration_number)
                if iteration_number % self.save_after_iterations == 0:
                    print("\n\nINFO: Saving model\n\n")
                    self.saver_sparse.save(sess, self.model_path)
                    with open(self.model_path + '.txt', 'w') as f:
                        f.write(str(iteration_number))

            # Stop the threads
            coord.request_stop()

            # Wait for threads to stop
            coord.join(threads)
コード例 #6
0
    def profile(self):
        global bb
        tf.reset_default_graph()
        self.initialize_profile()
        print("Beginning to profile network with parameters",
              get_num_parameters(self.model.get_variable_scope()))
        placeholders = self.model.get_placeholders()

        subprocess.call("mkdir -p %s" % (self.profile_out_path), shell=True)

        graph_output = self.model.get_compute_graphs()

        inputs_feed = self.reader_factory.get_class(
            self.reader_type)(self.training_files, self.num_max_entries,
                              self.num_data_dims, bb).get_feeds()

        init = [
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        ]

        session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
                                      inter_op_parallelism_threads=1)

        inference_time_values = []
        with tf.Session(config=session_conf) as sess:
            # with tf.Session() as sess:
            sess.run(init)
            profiler = Profiler(sess.graph)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            iteration_number = 0

            print("Starting iterations")
            while iteration_number < 20:

                inputs_train = sess.run(list(inputs_feed))

                if len(placeholders) == 5:
                    inputs_train_dict = {
                        placeholders[0]:
                        inputs_train[0][:, :, self.spatial_features_indices],
                        placeholders[1]:
                        inputs_train[0][:, :,
                                        self.spatial_features_local_indices],
                        placeholders[2]:
                        inputs_train[0][:, :, self.other_features_indices],
                        placeholders[3]:
                        inputs_train[0][:, :, self.target_indices],
                        placeholders[4]:
                        inputs_train[1],
                        self.model.is_train:
                        True,
                        self.model.learning_rate:
                        1
                    }
                else:
                    inputs_train_dict = {
                        placeholders[0]:
                        inputs_train[0][:, :, self.spatial_features_indices],
                        placeholders[1]:
                        inputs_train[0][:, :,
                                        self.spatial_features_local_indices],
                        placeholders[2]:
                        inputs_train[0][:, :, self.other_features_indices],
                        placeholders[3]:
                        inputs_train[0][:, :, self.target_indices],
                        placeholders[4]:
                        inputs_train[1],
                        placeholders[5]:
                        inputs_train[2],
                        self.model.is_train:
                        True,
                        self.model.learning_rate:
                        1
                    }
                run_meta = tf.RunMetadata()
                start_time = time.time()
                eval_output = sess.run(
                    graph_output,
                    feed_dict=inputs_train_dict,
                    options=tf.RunOptions(
                        trace_level=tf.RunOptions.FULL_TRACE),
                    run_metadata=run_meta)
                print("XC Time: --- %s seconds --- Iteration %d" %
                      (time.time() - start_time, iteration_number))
                profiler.add_step(iteration_number, run_meta)

                # Or profile the timing of your model operations.
                opts = option_builder.ProfileOptionBuilder.time_and_memory()
                profiler.profile_operations(options=opts)

                # Or you can generate a timeline:
                opts = (option_builder.ProfileOptionBuilder(
                    option_builder.ProfileOptionBuilder.time_and_memory()
                ).with_step(iteration_number).with_timeline_output(
                    os.path.join(self.profile_out_path, 'profile')).build())
                x = profiler.profile_graph(options=opts)

                inference_time_values.append(x.total_exec_micros)
                peak_bytes = x.total_peak_bytes

                iteration_number += 1

            print(self.config_name, "Batch size: ", bb)
            print(repr(np.array(inference_time_values)))
            print(
                "Mean",
                np.mean(np.array(inference_time_values, dtype=np.float32)[1:]))
            print(
                "Variance",
                np.std(np.array(inference_time_values, dtype=np.float32)[1:]))
            print("Peak bytes", peak_bytes)

            # Stop the threads
            coord.request_stop()

            # Wait for threads to stop
            coord.join(threads)
コード例 #7
0
    def profile(self):
        self.initialize()
        print("Beginning to profile network with parameters",
              get_num_parameters(self.model.get_variable_scope()))
        placeholders = self.model.get_placeholders()
        graph_loss = self.model.get_losses()
        graph_optmiser = self.model.get_optimizer()
        graph_summary = self.model.get_summary()
        graph_summary_validation = self.model.get_summary_validation()
        graph_accuracy = self.model.get_accuracy()
        graph_logits, graph_prediction = self.model.get_compute_graphs()
        graph_temp = self.model.get_temp()

        inputs_feed = self._get_input_feeds(self.training_files)
        inputs_validation_feed = self._get_input_feeds(self.validation_files)

        init = [
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        ]
        with tf.Session() as sess:
            sess.run(init)
            profiler = Profiler(sess.graph)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            summary_writer = tf.summary.FileWriter(self.summary_path,
                                                   sess.graph)

            iteration_number = 0

            print("Starting iterations")
            run_ops = [
                graph_temp, graph_loss, graph_optmiser, graph_summary,
                graph_accuracy, graph_prediction
            ]
            while iteration_number < 1000:
                inputs_train = sess.run(list(inputs_feed))

                inputs_train_dict = {
                    placeholders[0]: inputs_train[0],
                    placeholders[1]: inputs_train[1],
                    placeholders[2]: inputs_train[2],
                    placeholders[3]: inputs_train[3],
                    placeholders[4]: inputs_train[4]
                }

                if iteration_number % 100 == 0:
                    run_meta = tf.RunMetadata()

                    sess.run(run_ops,
                             feed_dict=inputs_train_dict,
                             options=tf.RunOptions(
                                 trace_level=tf.RunOptions.FULL_TRACE),
                             run_metadata=run_meta)

                    profiler.add_step(iteration_number, run_meta)

                    # Profile the parameters of your model.
                    profiler.profile_name_scope(
                        options=(option_builder.ProfileOptionBuilder.
                                 trainable_variables_parameter()))

                    # Or profile the timing of your model operations.
                    opts = option_builder.ProfileOptionBuilder.time_and_memory(
                    )
                    profiler.profile_operations(options=opts)

                    # Or you can generate a timeline:
                    opts = (option_builder.ProfileOptionBuilder(
                        option_builder.ProfileOptionBuilder.time_and_memory(
                        )).with_step(iteration_number).with_timeline_output(
                            self.config['profiler_output_file_name']).build())
                    profiler.profile_graph(options=opts)

                else:
                    sess.run(run_ops, feed_dict=inputs_train_dict)

                print("Profiling - Iteration %4d" % iteration_number)
                iteration_number += 1

            # Stop the threads
            coord.request_stop()

            # Wait for threads to stop
            coord.join(threads)