예제 #1
0
def main(argv=None):
    _init_output_directories()

    # step 1: create the negatives and positives directory
    if FLAGS.do_full_prepare:
        print('Loading labels from %s' % FLAGS.label_file)
        lr = LabelRecord()
        label_records = lr.load(FLAGS.label_file)
        all_bounding_boxes = Box.get_all_bounding_boxes(label_records)
        counter = 0

        # fill examples, originals, negatives, and positives directories
        print('Processing images...')
        for (_, v) in label_records.items():
            Progress.show_progress(counter)
            image = CXRImage.get_image_data(v.filename, FLAGS.image_path)
            basefilename = os.path.splitext(v.filename)[0]
            if v.hasBoundingBox:
                for i in range(0, v.boundingBoxes.shape[0]):
                    box = v.boundingBoxes[i, :]
                    #CXRImage.extract_center_and_write(image,box,1024,1024,FLAGS.positives_dir)
                    CXRImage.extract_anisotropic_scale_and_write(
                        image, box, FLAGS.image_size, FLAGS.image_size,
                        FLAGS.positives_dir)
                CXRImage.write_image(image, FLAGS.examples_dir,
                                     "%s.jpg" % basefilename)
            else:
                i = np.int32(
                    np.random.randint(0, all_bounding_boxes.shape[0] - 1))
                box = all_bounding_boxes[i, :]
                CXRImage.extract_anisotropic_scale_and_write(
                    image, box, FLAGS.image_size, FLAGS.image_size,
                    FLAGS.negatives_dir)
                #CXRImage.extract_center_and_write(image,box,1024,1024,FLAGS.negatives_dir)

            if (v.hasBoundingBox):
                img = (CXRImage.xlate_image(image) * 255).astype(np.uint8)
                CXRImage.write_image_with_bounding_boxes(
                    img, FLAGS.originals_dir, "%s.jpg" % basefilename,
                    v.boundingBoxes)
            counter = counter + 1

    # step 2: create the pre-training features by combining negatives and positives into pre_train.tfrecord
    print('\nCreating pre-train file...')
    rec = Record(1024, 1024, 1 if FLAGS.grayscale else 3)
    total = rec.create_pre_train_file(FLAGS.positives_dir, FLAGS.negatives_dir,
                                      FLAGS.pre_train_file)
    print('\n%d files combined in %s' % (total, FLAGS.pre_train_file))
예제 #2
0
파일: verify.py 프로젝트: ltenny/rsna
def main(argv=None):
    if FLAGS.grayscale:
        print('Grayscale not currently supported!')
        return

    _init_output_directories()
    batch_size = 100
    rec = Record(1024, 1024, 1 if FLAGS.grayscale else 3)
    with tf.Graph().as_default():
        image, label = rec.read_pre_train_record([FLAGS.pre_train_file])
        images, labels = tf.train.shuffle_batch([image, label],
                                                batch_size=batch_size,
                                                capacity=300,
                                                num_threads=10,
                                                min_after_dequeue=10)
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        with tf.Session() as sess:
            sess.run(init_op)
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)
            image_batch, label_batch = sess.run([images, labels])

            for ndx in range(batch_size):
                img = image_batch[ndx]
                lbl = label_batch[ndx]
                print('Getting image for label %d' % lbl)
                img = img.astype(np.uint8)
                img = np.stack([img] * 3, axis=2)
                print(img.shape)
                new_image = Image.fromarray(img)
                if lbl == 1:
                    path = FLAGS.positives
                else:
                    path = FLAGS.negatives
                new_image.save(os.path.join(path,
                                            '%s.jpg' % str(uuid.uuid4())))

            coord.request_stop()
            coord.join(threads)
            sess.close()
예제 #3
0
    def __init__(self, args):
        self.args = args
        self.model = None
        self.optimizer = None
        self.scheduler = None
        self.epoch = 0

        # s = State(args)
        set_seed(self.args.seed, self.args.cudnn_behavoir)
        self.log = Log(self.args.log_path)
        self.writer = Tensorboard(self.args.tensorboard_path)
        self.stati  = Statistic(self.args.expernameid, self.args.experid_path, self.args.root_path)
        self.stati.add('hparam', self.args.dict())
        # s.writer.add_hparams(hparam_dict=s.args.dict(), metric_dict={})
        self.record = Record()
예제 #4
0
    def work(self, COORD, render=False):
        """Main function of the Workers. This runs the environment and the experience
        is used to update the main Actor Critic Network.
        """
        #Allowing access to the global variables.
        while not COORD.should_stop() and self.sess.run(
                self.global_step) < self.settings["MaxEpisodes"]:

            self.sess.run(self.global_step_next)

            logging = interval_flag(self.sess.run(self.global_step),
                                    self.settings["LogFreq"], 'log')
            saving = interval_flag(self.sess.run(self.global_step),
                                   self.settings["SaveFreq"], 'save')

            s0 = self.env.reset()

            for j in range(self.settings["MaxEpisodeSteps"] + 1):

                a, networkData = self.net.GetAction(state=s0)

                s1, r, done, _ = self.env.step(a)
                if render:
                    self.env.render()

                self.net.AddToTrajectory([s0, a, r, s1, done] + networkData)

                s0 = s1

                if done or j == self.settings[
                        "MaxEpisodeSteps"]:  # update global and assign to local net
                    self.net.Update(self.settings["NetworkHPs"],
                                    self.sess.run(self.global_step))
                    break

            self.progbar.update(self.sess.run(self.global_step))

            if saving:
                self.saver.save(self.sess,
                                self.MODEL_PATH + '/ctf_policy.ckpt',
                                global_step=self.sess.run(self.global_step))

            if logging:
                loggingDict = self.env.getLogging()
                dict = self.net.GetStatistics()
                loggingDict.update(dict)
                Record(loggingDict, self.writer,
                       self.sess.run(self.global_step))
예제 #5
0
파일: ApeX.py 프로젝트: vanstrn/RL_public
    def work(self, COORD, render=False):
        """Main function of the Workers. This runs the environment and the experience
        is used to update the main Actor Critic Network.
        """
        #Allowing access to the global variables.
        while not COORD.should_stop() and self.sess.run(
                self.global_step) < self.settings["MaxEpisodes"]:

            logging = interval_flag(self.sess.run(self.global_step),
                                    self.settings["LogFreq"], 'logNet')

            self.net.Update(self.settings["NetworkHPs"],
                            self.sess.run(self.global_step))
            if logging:
                loggingDict = self.net.GetStatistics()
                Record(loggingDict, self.writer,
                       self.sess.run(self.global_step))
예제 #6
0
    def work(self, COORD, render=False):
        """Main function of the Workers. This runs the environment and the experience
        is used to update the main Actor Critic Network.
        """
        #Allowing access to the global variables.
        while not COORD.should_stop() and self.sess.run(
                self.global_step) < self.settings["MaxEpisodes"]:

            self.sess.run(self.global_step_next)

            logging = interval_flag(self.sess.run(self.global_step),
                                    self.settings["LogFreq"], 'log')
            saving = interval_flag(self.sess.run(self.global_step),
                                   self.settings["SaveFreq"], 'save')

            #Initializing environment and storage variables:
            s0 = self.env.reset()
            a_past = [0]
            r_i_past = [0.0]
            r_e_past = [0.0]

            for j in range(self.settings["MaxEpisodeSteps"] + 1):

                a, networkData = self.net.GetAction(state=s0,
                                                    episode=self.sess.run(
                                                        self.global_step),
                                                    step=j,
                                                    a_past=a_past,
                                                    r_i_past=r_i_past,
                                                    r_e_past=r_e_past)
                #networkData is expected to be [betaVal,betaOH]

                s1, r, done, _ = self.env.step(a)
                if render:
                    self.env.render()

                #Calculating Intrinsic Reward of the state:
                r_intrinsic = self.net.GetIntrinsicReward(s0, s1)
                r_total = r + networkData[0] * r_intrinsic

                #Adding to the trajectory
                self.net.AddToTrajectory(
                    [s0, a, r_total, s1, done, a_past, r_i_past, r_e_past] +
                    networkData)

                #Updating the storage variables.
                s0 = s1
                a_past = a
                r_i_past = [r_intrinsic]
                r_e_past = r

                #Pushing entire trajectory to the buffer
                if done or j == self.settings["MaxEpisodeSteps"]:
                    self.net.PushToBuffer()
                    break

            self.progbar.update(self.sess.run(self.global_step))
            if logging:
                loggingDict = self.env.getLogging()
                Record(loggingDict, self.writer,
                       self.sess.run(self.global_step))
            if saving:
                self.saver.save(self.sess,
                                self.MODEL_PATH + '/ctf_policy.ckpt',
                                global_step=self.sess.run(self.global_step))
예제 #7
0
    s0 = env.reset()

    for j in range(settings["MAX_EP_STEPS"]+1):
        updating = interval_flag(j, settings['UPDATE_GLOBAL_ITER'], 'update')

        a_hier, networkData = net.GetAction(state=s0,episode=sess.run(global_step),step=j)
        a = UseSubpolicy(s0,a_hier)
        s1,r,done,_ = env.step(action=a)

        net.AddToTrajectory([s0,a_hier,r,s1,done]+networkData)

        s0 = s1
        if updating:   # update global and assign to local net
            net.Update(settings["NetworkHPs"],sess.run(global_step))
        if done or j == settings["MAX_EP_STEPS"]:
            net.Update(settings["NetworkHPs"],sess.run(global_step))
            break
    loggingDict = env.getLogging()
    if logging:
        dict = net.GetStatistics()
        loggingDict.update(dict)
        Record(loggingDict, writer, sess.run(global_step))
        for func in loggingFunctions:
            func(sess.run(global_step))

    if saving:
        saver.save(sess, MODEL_PATH_+'/ctf_policy.ckpt', global_step=sess.run(global_step))
    progbar.update(i)
    # print(j,r)
예제 #8
0
    def pre_train(self):
        if not self.init_pre_train_complete:
            print('init_pre_train() not called!')
            return

        print(' ')
        print('Pre-training with the following parameters:')
        print('\tState directory: %s' % self.pre_train_dir)
        print('\tTrainable variables are 16 bit? : %s' %
              ('Yes' if self.use_fp16 else 'No'))
        print('\tBatch size: %d' % self.pre_train_batch_size)
        print('\tTraining images are: [%d,%d,%d]' %
              (self.pre_train_height, self.pre_train_width, 1))
        print('\tLearning rate: %.3f' % self.learning_rate)
        print('\tRMSProp Optimizer decay: %.3f' % self.optimizer_decay)
        print('\tRMSProp Optimizer momentum: %.3f' % self.optimizer_momentum)
        print(' ')

        with tf.Graph().as_default():
            global_step = tf.train.get_or_create_global_step()
            rec = Record(self.pre_train_height, self.pre_train_width, 1,
                         self.use_fp16)
            with tf.device('/cpu:0'):
                image, label = rec.read_pre_train_record([self.pre_train_file])
                images, labels = tf.train.batch(
                    [image, label],
                    batch_size=self.pre_train_batch_size,
                    num_threads=self.pre_train_number_of_threads)

            _, pretrain = self.feature_network(images)
            loss = self._pretrain_loss(pretrain, labels)
            pretrain_op = self._get_pretrain_op(loss, global_step)

            batch_size = self.pre_train_batch_size
            logfreq = self.log_frequency

            #beholder_hook = BeholderHook(self.pre_train_dir)
            class _LoggerHook(tf.train.SessionRunHook):
                def begin(self):
                    self.pre_train_step = -1
                    self.start_time = time.time()

                def before_run(self, run_context):
                    self.pre_train_step += 1
                    return tf.train.SessionRunArgs(loss)

                def after_run(self, run_context, run_values):
                    if self.pre_train_step % logfreq == 0:
                        current_time = time.time()
                        duration = current_time - self.start_time
                        self.start_time = current_time

                        loss_value = run_values.results
                        examples_per_sec = logfreq * batch_size / duration
                        sec_per_batch = float(duration / logfreq)

                        format_str = (
                            '%s: step %d, loss = %.3f  (%.1f examples/sec; %.3f sec/batch)'
                        )
                        print(format_str %
                              (datetime.now(), self.pre_train_step, loss_value,
                               examples_per_sec, sec_per_batch))

            with tf.train.MonitoredTrainingSession(
                    checkpoint_dir=self.pre_train_dir,
                    hooks=[
                        tf.train.StopAtStepHook(
                            last_step=self.pre_train_max_steps),
                        tf.train.NanTensorHook(loss),
                        _LoggerHook()
                    ],
                    config=tf.ConfigProto(
                        log_device_placement=False)) as mon_sess:

                while not mon_sess.should_stop():
                    mon_sess.run([pretrain_op])