Пример #1
0
 def write_to_file(self, f):
     """Write a BatchFile to a file object.
     
     f -- the file to write to (needs to support write)
     
     """
     write_record(f, RecordTypes.BESTANDSVOORLOOP, **self.recordargs)
     for i, batch in enumerate(self.batches):
         batch.write_to_file(f, i+1)  # i+1 -- people start counting at 1
     write_record(f, RecordTypes.BESTANDSSLUIT)
Пример #2
0
def vis_one(vis_graph, filepath, samples, predictions, sv):
    movefile(filepath)
    record.write_record(None, reader)
    with sv.managed_session('', start_standard_services=False) as sess:

        sv.saver.restore(sess, tf.train.latest_checkpoint(A_checkpoint_dir))
        a = sv.start_queue_runners(sess)

        save_dir = os.path.join(work_dir, _SEMANTIC_PREDICTION_SAVE_FOLDER)

        my_process_batch(sess, samples[common.ORIGINAL_IMAGE], predictions,
                         samples[common.IMAGE_NAME], samples[common.HEIGHT],
                         samples[common.WIDTH], save_dir)
Пример #3
0
def serve(filepath):
    global reader_graph
    movefile(filepath)

    # tf.reset_default_graph()

    get_reader()

    with reader_graph.as_default():
        record.write_record(None, get_reader())

    # input('record finish')

    vis_graph = tf.Graph()
    vis_graph.as_default()
    # with vis_graph.as_default():
    # -------------------------------------------

    samples, predictions = do_prepare()

    tf.train.get_or_create_global_step()
    saver = tf.train.Saver(slim.get_variables_to_restore())
    # supervisor = tf.train.Supervisor(graph=vis_graph,
    supervisor = tf.train.Supervisor(init_op=tf.global_variables_initializer(),
                                     summary_op=None,
                                     summary_writer=None,
                                     global_step=None,
                                     saver=saver)

    with supervisor.managed_session(A_master,
                                    start_standard_services=False) as sess:

        supervisor.start_queue_runners(sess)
        my_checkpoint = tf.train.latest_checkpoint(A_checkpoint_dir)
        supervisor.saver.restore(sess, my_checkpoint)
        print(my_checkpoint)

        do_process_batch(sess, samples, predictions)

        skel_extract.extract()
        skel_extract.load()

    # graph.finalize()
    vis_graph.finalize()
Пример #4
0
    def write_to_file(self, f, index):
        """Write a Batch to a file object.
        
        f     -- the file to write to (needs to support write)
        index -- index number of this batch
        
        """

        recordargs = self.recordargs
        recordargs.update({
            'batchvolgnummer': index,
            'totaalbedrag': 0,
            'totaalrekeningen': 0,
            'aantalposten': 0,
        })

        write_record(f, RecordTypes.BATCHVOORLOOP, **recordargs)

        try:
            self.description.write_to_file(f)
        except AttributeError:
            pass

        write_record(f, RecordTypes.OPDRACHTGEVER, **recordargs)

        # TODO: Loop over transactions

        write_record(f, RecordTypes.BATCHSLUIT, **recordargs)
Пример #5
0
def process_one(filepath):
    global reader_graph
    global vis_graph

    movefile(filepath)
    with reader_graph.as_default():
        record.write_record(None, get_reader())

    # with get_prepare_graph().as_default():
    #     samples, predictions = do_prepare()
    #
    # with vis_graph.as_default():
    #     tf.train.get_or_create_global_step()
    #     sv = get_supervisor()
    #     with sv.managed_session(A_master, start_standard_services=False) as sess:
    #         do_process_batch(sess, samples, predictions)
    #         skel_extract.extract()
    #         skel_extract.load()

    with vis_graph.as_default():
        with get_session().as_default():
            samples, predictions = do_prepare()
            tf.train.get_or_create_global_step()
            do_process_batch(get_session(), samples, predictions)
Пример #6
0
def process_one(filepath):

    movefile(filepath)
    record.write_record(None, reader)

    # with get_prepare_graph().as_default():
    #     samples, predictions = do_prepare()
    #
    # with vis_graph.as_default():
    #     tf.train.get_or_create_global_step()
    #     sv = get_supervisor()
    #     with sv.managed_session(A_master, start_standard_services=False) as sess:
    #         do_process_batch(sess, samples, predictions)
    #         skel_extract.extract()
    #         skel_extract.load()

    vis_graph = tf.Graph()
    with vis_graph.as_default():
        dataset = segmentation_dataset.get_dataset(A_dataset,
                                                   A_vis_split,
                                                   dataset_dir=A_dataset_dir)
        samples = input_generator.get(dataset,
                                      A_vis_crop_size,
                                      A_vis_batch_size,
                                      min_resize_value=A_min_resize_value,
                                      max_resize_value=A_max_resize_value,
                                      resize_factor=A_resize_factor,
                                      dataset_split=A_vis_split,
                                      is_training=False,
                                      model_variant=A_model_variant)
        model_options = mycommon.ModelOptions(
            outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_classes},
            crop_size=A_vis_crop_size,
            atrous_rates=A_atrous_rates,
            output_stride=A_output_stride)
        print(samples[common.IMAGE])

        predictions = model.predict_labels(samples[common.IMAGE],
                                           model_options=model_options,
                                           image_pyramid=A_image_pyramid)

        predictions = predictions[common.OUTPUT_TYPE]

        tf.train.get_or_create_global_step()
        vis_session = tf.Session(graph=vis_graph)
        saver = tf.train.Saver(slim.get_variables_to_restore())
        sv = tf.train.Supervisor(graph=vis_graph,
                                 logdir=A_vis_logdir,
                                 init_op=tf.global_variables_initializer(),
                                 summary_op=None,
                                 summary_writer=None,
                                 global_step=None,
                                 saver=saver)
        with sv.managed_session('', start_standard_services=False) as sess:
            sv.start_queue_runners(sess)
            sv.saver.restore(sess,
                             tf.train.latest_checkpoint(A_checkpoint_dir))
            #samples, predictions = do_prepare()
            #tf.train.get_or_create_global_step()
            #do_process_batch(get_session(), samples, predictions)
            save_dir = os.path.join(work_dir, _SEMANTIC_PREDICTION_SAVE_FOLDER)

            my_process_batch(sess, samples[common.ORIGINAL_IMAGE], predictions,
                             samples[common.IMAGE_NAME],
                             samples[common.HEIGHT], samples[common.WIDTH],
                             save_dir)