コード例 #1
0
ファイル: task.py プロジェクト: cottrell/notebooks
def main(argv=None):
  """Run a Tensorflow model on the Criteo dataset."""
  env = json.loads(os.environ.get('TF_CONFIG', '{}'))
  # First find out if there's a task value on the environment variable.
  # If there is none or it is empty define a default one.
  task_data = env.get('task') or {'type': 'master', 'index': 0}
  argv = sys.argv if argv is None else argv
  args = create_parser().parse_args(args=argv[1:])

  trial = task_data.get('trial')
  if trial is not None:
    output_dir = os.path.join(args.output_path, trial)
  else:
    output_dir = args.output_path

  # Do only evaluation if instructed so, or call Experiment's run.
  if args.eval_only_summary_filename:
    experiment = get_experiment_fn(args)(output_dir)
    # Note that evaluation here will appear as 'one_pass' in tensorboard.
    results = experiment.evaluate(delay_secs=0)
    # Converts numpy types to native types for json dumps.
    json_out = json.dumps(
        {key: value.tolist() for key, value in results.iteritems()})
    with tf.Session():
      tf.write_file(args.eval_only_summary_filename, json_out).run()
  else:
    learn_runner.run(experiment_fn=get_experiment_fn(args),
                     output_dir=output_dir)
コード例 #2
0
ファイル: eval.py プロジェクト: IoannisKansizoglou/models
def main(_, run_eval_loop=True):
  # Fetch and generate images to run through Inception.
  with tf.name_scope('inputs'):
    real_data, num_classes = _get_real_data(
        FLAGS.num_images_generated, FLAGS.dataset_dir)
    generated_data = _get_generated_data(
        FLAGS.num_images_generated, FLAGS.conditional_eval, num_classes)

  # Compute Frechet Inception Distance.
  if FLAGS.eval_frechet_inception_distance:
    fid = util.get_frechet_inception_distance(
        real_data, generated_data, FLAGS.num_images_generated,
        FLAGS.num_inception_images)
    tf.summary.scalar('frechet_inception_distance', fid)

  # Compute normal Inception scores.
  if FLAGS.eval_real_images:
    inc_score = util.get_inception_scores(
        real_data, FLAGS.num_images_generated, FLAGS.num_inception_images)
  else:
    inc_score = util.get_inception_scores(
        generated_data, FLAGS.num_images_generated, FLAGS.num_inception_images)
  tf.summary.scalar('inception_score', inc_score)

  # If conditional, display an image grid of difference classes.
  if FLAGS.conditional_eval and not FLAGS.eval_real_images:
    reshaped_imgs = util.get_image_grid(
        generated_data, FLAGS.num_images_generated, num_classes,
        FLAGS.num_images_per_class)
    tf.summary.image('generated_data', reshaped_imgs, max_outputs=1)

  # Create ops that write images to disk.
  image_write_ops = None
  if FLAGS.conditional_eval and FLAGS.write_to_disk:
    reshaped_imgs = util.get_image_grid(
        generated_data, FLAGS.num_images_generated, num_classes,
        FLAGS.num_images_per_class)
    uint8_images = data_provider.float_image_to_uint8(reshaped_imgs)
    image_write_ops = tf.write_file(
        '%s/%s'% (FLAGS.eval_dir, 'conditional_cifar10.png'),
        tf.image.encode_png(uint8_images[0]))
  else:
    if FLAGS.num_images_generated >= 100 and FLAGS.write_to_disk:
      reshaped_imgs = tfgan.eval.image_reshaper(
          generated_data[:100], num_cols=FLAGS.num_images_per_class)
      uint8_images = data_provider.float_image_to_uint8(reshaped_imgs)
      image_write_ops = tf.write_file(
          '%s/%s'% (FLAGS.eval_dir, 'unconditional_cifar10.png'),
          tf.image.encode_png(uint8_images[0]))

  # For unit testing, use `run_eval_loop=False`.
  if not run_eval_loop: return
  tf.contrib.training.evaluate_repeatedly(
      FLAGS.checkpoint_dir,
      master=FLAGS.master,
      hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
             tf.contrib.training.StopAfterNEvalsHook(1)],
      eval_ops=image_write_ops,
      max_number_of_evaluations=FLAGS.max_number_of_evaluations)
コード例 #3
0
ファイル: conditional_eval.py プロジェクト: CoolSheng/models
def main(_, run_eval_loop=True):
  with tf.name_scope('inputs'):
    noise, one_hot_labels = _get_generator_inputs(
        FLAGS.num_images_per_class, NUM_CLASSES, FLAGS.noise_dims)

  # Generate images.
  with tf.variable_scope('Generator'):  # Same scope as in train job.
    images = networks.conditional_generator((noise, one_hot_labels))

  # Visualize images.
  reshaped_img = tfgan.eval.image_reshaper(
      images, num_cols=FLAGS.num_images_per_class)
  tf.summary.image('generated_images', reshaped_img, max_outputs=1)

  # Calculate evaluation metrics.
  tf.summary.scalar('MNIST_Classifier_score',
                    util.mnist_score(images, FLAGS.classifier_filename))
  tf.summary.scalar('MNIST_Cross_entropy',
                    util.mnist_cross_entropy(
                        images, one_hot_labels, FLAGS.classifier_filename))

  # Write images to disk.
  image_write_ops = tf.write_file(
      '%s/%s'% (FLAGS.eval_dir, 'conditional_gan.png'),
      tf.image.encode_png(data_provider.float_image_to_uint8(reshaped_img[0])))

  # For unit testing, use `run_eval_loop=False`.
  if not run_eval_loop: return
  tf.contrib.training.evaluate_repeatedly(
      FLAGS.checkpoint_dir,
      hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
             tf.contrib.training.StopAfterNEvalsHook(1)],
      eval_ops=image_write_ops,
      max_number_of_evaluations=FLAGS.max_number_of_evaluations)
コード例 #4
0
ファイル: text_encoder.py プロジェクト: zeyu-h/tensor2tensor
  def decode(self, ids):
    """Transform a sequence of int ids into an image file.

    Args:
      ids: list of integers to be converted.

    Returns:
      Path to the temporary file where the image was saved.

    Raises:
      ValueError: if the ids are not of the appropriate size.
    """
    _, tmp_file_path = tempfile.mkstemp()
    length = self._height * self._width * self._channels
    if len(ids) != length:
      raise ValueError("Length of ids (%d) must be height (%d) x width (%d) x "
                       "channels (%d); %d != %d.\n Ids: %s"
                       % (len(ids), self._height, self._width, self._channels,
                          len(ids), length, " ".join([str(i) for i in ids])))
    with tf.Graph().as_default():
      raw = tf.constant(ids, dtype=tf.uint8)
      img = tf.reshape(raw, [self._height, self._width, self._channels])
      png = tf.image.encode_png(img)
      op = tf.write_file(tmp_file_path, png)
      with tf.Session() as sess:
        sess.run(op)
    return tmp_file_path
コード例 #5
0
ファイル: io_ops_test.py プロジェクト: brchiu/tensorflow
 def testWriteFile(self):
     cases = ["", "Some contents"]
     for contents in cases:
         contents = tf.compat.as_bytes(contents)
         temp = tempfile.NamedTemporaryFile(prefix="WriteFileTest", dir=self.get_temp_dir())
         with self.test_session() as sess:
             w = tf.write_file(temp.name, contents)
             sess.run(w)
             file_contents = open(temp.name, "rb").read()
             self.assertEqual(file_contents, contents)
コード例 #6
0
ファイル: io_ops_test.py プロジェクト: BloodD/tensorflow
 def testWriteFile(self):
   cases = ['', 'Some contents']
   for contents in cases:
     contents = tf.compat.as_bytes(contents)
     with tempfile.NamedTemporaryFile(prefix='WriteFileTest',
                                      dir=self.get_temp_dir(),
                                      delete=False) as temp:
       pass
     with self.test_session() as sess:
       w = tf.write_file(temp.name, contents)
       sess.run(w)
       with open(temp.name, 'rb') as f:
         file_contents = f.read()
       self.assertEqual(file_contents, contents)
     os.remove(temp.name)
コード例 #7
0
    def decode(self, ids, strip_extraneous=False):
        """Transform a sequence of int ids into an image file.

    Args:
      ids: list of integers to be converted.
      strip_extraneous: unused

    Returns:
      Path to the temporary file where the image was saved.

    Raises:
      ValueError: if the ids are not of the appropriate size.
    """
        del strip_extraneous
        _, tmp_file_path = tempfile.mkstemp("_decode.png")
        if self._height is None or self._width is None:
            size = int(math.sqrt(len(ids) / self._channels))
            length = size * size * self._channels
        else:
            size = None
            length = self._height * self._width * self._channels
        if len(ids) != length:
            raise ValueError(
                "Length of ids (%d) must be height (%d) x width (%d) x "
                "channels (%d); %d != %d.\n Ids: %s" %
                (len(ids), self._height, self._width, self._channels, len(ids),
                 length, " ".join([str(i) for i in ids])))
        with tf.Graph().as_default():
            raw = tf.constant(ids, dtype=tf.uint8)
            if size is None:
                img = tf.reshape(raw,
                                 [self._height, self._width, self._channels])
            else:
                img = tf.reshape(raw, [size, size, self._channels])
            png = tf.image.encode_png(img)
            op = tf.write_file(tmp_file_path, png)
            with tf.Session() as sess:
                sess.run(op)
        return tmp_file_path
コード例 #8
0
ファイル: main.py プロジェクト: prateekj117/IStegGAN
    def test(self, saver, files_list, batch_size, path):
        self.load_chkp(saver, path)
        for step in range(1000):
            print("Epoch:\n", step)
            p = 1
            covers, secrets = get_img_batch(files_list, batch_size, p)
            self.sess.run(
                [self.hiding_output_op, self.reveal_output_op, self.summary_op, self.loss_op, self.secret_loss_op,
                 self.cover_loss_op],
                feed_dict={"input_prep:0": secrets, "input_hide:0": covers})
            hiding_output_op, reveal_output_op, summary, total_loss, cover_loss, secret_loss = self.sess.run(
                [self.hiding_output_op, self.reveal_output_op, self.summary_op, self.loss_op, self.cover_loss_op,
                 self.secret_loss_op],
                feed_dict={"input_prep:0": secrets, "input_hide:0": covers})
            self.writer.add_summary(summary)

            # hiding_output_op = self.get_tensor_to_img_op(hiding_output_op)

            for k in range(batch_size):
                im = tf.reshape(tf.cast(hiding_output_op[k], tf.uint8), [224, 224, 3])
                images_encode = tf.image.encode_jpeg(im)
                fname = tf.constant('%s.jpeg' % self.i)
                self.i += 1
                fwrite = tf.write_file(fname, images_encode)

            for k in range(batch_size):
                im = Image.fromarray(np.uint8((hiding_output_op[k]) * 255))
                im.save('./Wnet/container/%s.jpg' % self.i)
                self.i += 1

            for k in range(batch_size):
                im = Image.fromarray(np.uint8((reveal_output_op[k]) * 255))
                im.save('./Wnet/secret/%s.jpg' % self.j)
                self.j += 1

            print("total loss at step %s: %s" % (step, total_loss))
            print("cover loss at step %s: %s" % (step, cover_loss))
            print("secret loss at step %s: %s" % (step, secret_loss))
コード例 #9
0
def main(_, run_eval_loop=True):
  # Fetch real images.
  with tf.name_scope('inputs'):
    real_images, _, _ = data_provider.provide_data(
        'train', FLAGS.num_images_generated, FLAGS.dataset_dir)

  image_write_ops = None
  if FLAGS.eval_real_images:
    tf.summary.scalar('MNIST_Classifier_score',
                      util.mnist_score(real_images, FLAGS.classifier_filename))
  else:
    # In order for variables to load, use the same variable scope as in the
    # train job.
    with tf.variable_scope('Generator'):
      images = networks.unconditional_generator(
          tf.random_normal([FLAGS.num_images_generated, FLAGS.noise_dims]),
          is_training=False)
    tf.summary.scalar('MNIST_Frechet_distance',
                      util.mnist_frechet_distance(
                          real_images, images, FLAGS.classifier_filename))
    tf.summary.scalar('MNIST_Classifier_score',
                      util.mnist_score(images, FLAGS.classifier_filename))
    if FLAGS.num_images_generated >= 100 and FLAGS.write_to_disk:
      reshaped_images = tfgan.eval.image_reshaper(
          images[:100, ...], num_cols=10)
      uint8_images = data_provider.float_image_to_uint8(reshaped_images)
      image_write_ops = tf.write_file(
          '%s/%s'% (FLAGS.eval_dir, 'unconditional_gan.png'),
          tf.image.encode_png(uint8_images[0]))

  # For unit testing, use `run_eval_loop=False`.
  if not run_eval_loop: return
  tf.contrib.training.evaluate_repeatedly(
      FLAGS.checkpoint_dir,
      hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
             tf.contrib.training.StopAfterNEvalsHook(1)],
      eval_ops=image_write_ops,
      max_number_of_evaluations=FLAGS.max_number_of_evaluations)
コード例 #10
0
def main(_, run_eval_loop=True):
    with tf.name_scope('inputs'):
        noise, one_hot_labels = _get_generator_inputs(
            FLAGS.num_images_per_class, NUM_CLASSES, FLAGS.noise_dims)

    # Generate images.
    with tf.variable_scope('Generator'):  # Same scope as in train job.
        images = networks.conditional_generator(
            (noise, one_hot_labels), is_training=False)

    # Visualize images.
    reshaped_img = tfgan.eval.image_reshaper(
        images, num_cols=FLAGS.num_images_per_class)
    tf.summary.image('generated_images', reshaped_img, max_outputs=1)

    # Calculate evaluation metrics.
    tf.summary.scalar('MNIST_Classifier_score',
                      util.mnist_score(images, FLAGS.classifier_filename))
    tf.summary.scalar('MNIST_Cross_entropy',
                      util.mnist_cross_entropy(
                          images, one_hot_labels, FLAGS.classifier_filename))

    # Write images to disk.
    image_write_ops = None
    if FLAGS.write_to_disk:
        image_write_ops = tf.write_file(
            '%s/%s' % (FLAGS.eval_dir, 'conditional_gan.png'),
            tf.image.encode_png(data_provider.float_image_to_uint8(
                reshaped_img[0])))

    # For unit testing, use `run_eval_loop=False`.
    if not run_eval_loop: return
    tf.contrib.training.evaluate_repeatedly(
        FLAGS.checkpoint_dir,
        hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
               tf.contrib.training.StopAfterNEvalsHook(1)],
        eval_ops=image_write_ops,
        max_number_of_evaluations=FLAGS.max_number_of_evaluations)
コード例 #11
0
def log_batch_images(img_batch, log_dir, step, sess):
    if step == 1:
        step = 100

    if step % 100 == 0:
        step = int(step / 100) * 100

        train_pic_dir = os.path.join(log_dir, 'training_pics')
        if not os.path.exists(train_pic_dir):
            os.makedirs(train_pic_dir)

        train_pic_dir = os.path.join(train_pic_dir,
                                     'batches_in_step_' + str(step))
        if not os.path.exists(train_pic_dir):
            os.makedirs(train_pic_dir)

        for idx, item in enumerate(img_batch[0]):
            write = tf.image.encode_png(item)
            write = tf.write_file(
                os.path.join(
                    train_pic_dir, 'trainee_' + str(step) + '_' + str(idx) +
                    '_' + str(img_batch[1][idx])) + '.png', write)
            sess.run(write)
コード例 #12
0
def write_image(image, file_name):
    '''
    Writes the given image to file.

    Arguments:
        image       -- The image to write to file.
        file_name   -- The full file name for the file to write.

    Returns:
        The write file output node.
    '''

    target_shape = [
        tf.shape(image)[1],
        tf.shape(image)[2],
        int(image.shape[3])
    ]
    target_shape[BATCH_STACK_AXIS] = -1

    converted_image = tf.reshape(convert_image(image), target_shape)
    raw_image = ENCODER_FUNCTION(converted_image)

    return tf.write_file(file_name, raw_image)
コード例 #13
0
def test(sess, test_references, run_keys=[], feed_dict_keys=[]):
    global IMAGE_HEIGHT, IMAGE_WIDTH
    model_loss, prediction = run_keys
    input_layer, ground_truth = feed_dict_keys
    total_loss = 0
    for test_reference in test_references:
        depth_filename = "/Users/georgestoica/Desktop/Research/Mini-Project/ds_depth/{}.png".format(
            test_reference)
        input_filename = "/Users/georgestoica/Desktop/Research/Mini-Project/ds_image/{}.png".format(
            test_reference)
        depth = np.reshape(
            cv2.imread(depth_filename,
                       cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH),
            (1, IMAGE_HEIGHT, IMAGE_WIDTH, 1))
        input_img = np.reshape(
            cv2.imread(input_filename,
                       cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH),
            (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3))

        loss, pred = sess.run([model_loss, prediction],
                              feed_dict={
                                  input_layer: input_img,
                                  ground_truth: depth
                              })
        # Encode Image
        pred_rs = tf.reshape(pred, [94, 311, 1])
        pred_u16 = tf.image.convert_image_dtype(pred_rs, tf.uint16)
        pred_enc = tf.image.encode_png(pred_u16)
        depth_fpath = tf.constant(
            "/Users/georgestoica/Desktop/Research/Mini-Project/pred_depth/{}.png"
            .format(test_reference))
        depth_write = tf.write_file(depth_fpath, pred_enc)
        dummy_ = sess.run(depth_write)

        total_loss += loss
    avg_loss = float(total_loss) / float(len(test_references))
    print("The average test loss is: " + str(avg_loss))
コード例 #14
0
def make_image(name, var, image_dims):
    prod = np.prod(image_dims)
    grid = form_image_grid(tf.reshape(var, [BATCH_SIZE, prod]), [GRID_ROWS,
        GRID_COLS], image_dims, 1)
    s_grid = tf.squeeze(grid, axis=0)

    # This reproduces the code in: tensorflow/core/kernels/summary_image_op.cc
    im_min = tf.reduce_min(s_grid)
    im_max = tf.reduce_max(s_grid)

    kZeroThreshold = tf.constant(1e-6)
    max_val = tf.maximum(tf.abs(im_min), tf.abs(im_max))

    offset = tf.cond(
            im_min < tf.constant(0.0),
            lambda: tf.constant(128.0),
            lambda: tf.constant(0.0)
            )
    scale = tf.cond(
            im_min < tf.constant(0.0),
            lambda: tf.cond(
                max_val < kZeroThreshold,
                lambda: tf.constant(0.0),
                lambda: tf.div(127.0, max_val)
                ),
            lambda: tf.cond(
                im_max < kZeroThreshold,
                lambda: tf.constant(0.0),
                lambda: tf.div(255.0, im_max)
                )
            )
    s_grid = tf.cast(tf.add(tf.multiply(s_grid, scale), offset), tf.uint8)
    enc = tf.image.encode_jpeg(s_grid)

    fwrite = tf.write_file(name, enc)
    return fwrite
#-----------------------------------------------------------------------------------

#creating augmented images----------------------------------------------------------
types = ('newImagesForAugment\*jpg', 'newImagesForAugment\*jpeg')
for files in types:
    IMAGES_NAME.extend(glob.glob(files))

# Path to image
sess1 = tf.Session()
for singleImage in IMAGES_NAME:
    img = pt.imread(singleImage)
    tf_img = tf.convert_to_tensor(img)
    brght_img = tf.image.flip_left_right(tf_img)
    fileToSave = tf.image.encode_jpeg(brght_img)
    fname = tf.constant(singleImage.split('.')[0] + 'output2.jpg')
    fwrite = tf.write_file(fname, fileToSave)
    result = sess1.run(fwrite)
    brght_img = tf.image.transpose_image(tf_img)
    fileToSave = tf.image.encode_jpeg(brght_img)
    fname = tf.constant(singleImage.split('.')[0] + 'transposed.jpg')
    fwrite = tf.write_file(fname, fileToSave)
    result = sess1.run(fwrite)
    brght_img = tf.image.rot90(tf_img)
    fileToSave = tf.image.encode_jpeg(brght_img)
    fname = tf.constant(singleImage.split('.')[0] + 'rot90.jpg')
    fwrite = tf.write_file(fname, fileToSave)
    result = sess1.run(fwrite)
    brght_img = tf.image.rot90(tf_img, k=3)
    fileToSave = tf.image.encode_jpeg(brght_img)
    fname = tf.constant(singleImage.split('.')[0] + 'rot90.jpg')
    fwrite = tf.write_file(fname, fileToSave)
コード例 #16
0
ファイル: Run_ColorNorm.py プロジェクト: leigaoyi/spcn
def run_batch_colornorm(filenames,
                        nstains,
                        lamb,
                        output_direc,
                        img_level,
                        background_correction=True,
                        config=None):

    if config is None:
        config = tf.ConfigProto(log_device_placement=False)

    g_1 = tf.Graph()
    with g_1.as_default():
        Wis1 = tf.placeholder(tf.float32)
        Img1 = tf.placeholder(tf.float32, shape=(None, None, 3))
        src_i_0 = tf.placeholder(tf.float32)

        s = tf.shape(Img1)
        Img_vecd = tf.reshape(tf.minimum(Img1, src_i_0), [s[0] * s[1], s[2]])
        V = tf.log(src_i_0 + 1.0) - tf.log(Img_vecd + 1.0)
        Wi_inv = tf.transpose(tf.py_func(np.linalg.pinv, [Wis1], tf.float32))
        Hiv1 = tf.nn.relu(tf.matmul(V, Wi_inv))

        Wit1 = tf.placeholder(tf.float32)
        Hiv2 = tf.placeholder(tf.float32)
        sav_name = tf.placeholder(tf.string)
        tar_i_0 = tf.placeholder(tf.float32)
        normfac = tf.placeholder(tf.float32)
        shape = tf.placeholder(tf.int32)

        Hsonorm = Hiv2 * normfac
        source_norm = tf.cast(
            tar_i_0 * tf.exp(
                (-1) * tf.reshape(tf.matmul(Hsonorm, Wit1), shape)), tf.uint8)
        enc = tf.image.encode_png(source_norm)
        fwrite = tf.write_file(sav_name, enc)

    session1 = tf.Session(graph=g_1, config=config)

    file_no = 0
    print "To be normalized:", filenames[1:], "using", filenames[0]
    for filename in filenames:

        display_separator()

        if background_correction:
            correc = "back-correc"
        else:
            correc = "no-back-correc"

        base_t = os.path.basename(filenames[0])  #target.svs
        fname_t = os.path.splitext(base_t)[0]  #target
        base_s = os.path.basename(filename)  #source.svs
        fname_s = os.path.splitext(base_s)[0]  #source
        f_form = os.path.splitext(base_s)[1]  #.svs
        s = output_direc + base_s.replace(
            ".", "_") + " (using " + base_t.replace(
                ".", "_") + " " + correc + ").png"
        # s=output_direc+base_s.replace(".", "_")+" (no-norm using "+base_t.replace(".", "_")+").png"
        #s=output_direc+fname_s+"_normalized.png"

        tic = time.time()
        print

        I = openslide.open_slide(filename)
        if img_level >= I.level_count:
            print "Level", img_level, "unavailable for image, proceeding with level 0"
            level = 0
        else:
            level = img_level
        xdim, ydim = I.level_dimensions[level]
        ds = I.level_downsamples[level]

        if file_no == 0:
            print "Target Stain Separation in progress:", filename, str(
                xdim) + str("x") + str(ydim)
        else:
            print "Source Stain Separation in progress:", filename, str(
                xdim) + str("x") + str(ydim)
        print "\t \t \t \t \t \t \t \t \t \t Time: 0"

        #parameters for W estimation
        num_patches = 20
        patchsize = 1000  #length of side of square

        i0_default = np.array([255., 255., 255.], dtype=np.float32)

        Wi, i0 = Wfast(I, nstains, lamb, num_patches, patchsize, level,
                       background_correction)
        if i0 is None:
            print "No white background detected"
            i0 = i0_default

        if not background_correction:
            print "Background correction disabled, default background intensity assumed"
            i0 = i0_default

        if Wi is None:
            print "Color Basis Matrix Estimation failed...image normalization skipped"
            continue
        print "W estimated",
        print "\t \t \t \t \t \t Time since processing started:", round(
            time.time() - tic, 3)
        Wi = Wi.astype(np.float32)

        if file_no == 0:
            print "Target Color Basis Matrix:"
            print Wi
            Wi_target = np.transpose(Wi)
            tar_i0 = i0
            print "Target Image Background white intensity:", i0
        else:
            print "Source Color Basis Matrix:"
            print Wi
            print "Source Image Background white intensity:", i0

        _max = 2000

        print
        if (xdim * ydim) <= (_max * _max):
            print "Small image processing..."
            img = np.asarray(I.read_region((0, 0), level, (xdim, ydim)),
                             dtype=np.float32)[:, :, :3]

            Hiv = session1.run(Hiv1,
                               feed_dict={
                                   Img1: img,
                                   Wis1: Wi,
                                   src_i_0: i0
                               })
            # Hta_Rmax = np.percentile(Hiv,q=99.,axis=0)
            H_Rmax = np.ones((nstains, ), dtype=np.float32)
            for i in range(nstains):
                t = Hiv[:, i]
                H_Rmax[i] = np.percentile(t[t > 0], q=99., axis=0)

            if file_no == 0:
                file_no += 1
                Hta_Rmax = np.copy(H_Rmax)
                print "Target H calculated",
                print "\t \t \t \t \t \t \t Total Time:", round(
                    time.time() - tic, 3)
                display_separator()
                continue

            print "Color Normalization in progress..."

            norm_fac = np.divide(Hta_Rmax, H_Rmax).astype(np.float32)
            session1.run(fwrite,
                         feed_dict={
                             shape: np.array(img.shape),
                             Wit1: Wi_target,
                             Hiv2: Hiv,
                             sav_name: s,
                             tar_i_0: tar_i0,
                             normfac: norm_fac
                         })

            print "File written to:", s
            print "\t \t \t \t \t \t \t \t \t \t Total Time:", round(
                time.time() - tic, 3)
            display_separator()

        else:
            _maxtf = 3000
            x_max = xdim
            y_max = min(max(int(_maxtf * _maxtf / x_max), 1), ydim)
            print "Large image processing..."
            if file_no == 0:
                Hivt = np.memmap('H_target',
                                 dtype='float32',
                                 mode='w+',
                                 shape=(xdim * ydim, 2))
            else:
                Hivs = np.memmap('H_source',
                                 dtype='float32',
                                 mode='w+',
                                 shape=(xdim * ydim, 2))
                sourcenorm = np.memmap('wsi',
                                       dtype='uint8',
                                       mode='w+',
                                       shape=(ydim, xdim, 3))
            x_tl = range(0, xdim, x_max)
            y_tl = range(0, ydim, y_max)
            print "WSI divided into", str(len(x_tl)) + "x" + str(len(y_tl))
            count = 0
            print "Patch-wise H calculation in progress..."
            ind = 0
            perc = []
            for x in x_tl:
                for y in y_tl:
                    count += 1
                    xx = min(x_max, xdim - x)
                    yy = min(y_max, ydim - y)
                    print "Processing:", count, "		patch size", str(
                        xx) + "x" + str(yy),
                    print "\t \t Time since processing started:", round(
                        time.time() - tic, 3)
                    img = np.asarray(I.read_region((int(ds * x), int(ds * y)),
                                                   level, (xx, yy)),
                                     dtype=np.float32)[:, :, :3]

                    Hiv = session1.run(Hiv1,
                                       feed_dict={
                                           Img1: img,
                                           Wis1: Wi,
                                           src_i_0: i0
                                       })
                    if file_no == 0:
                        Hivt[ind:ind + len(Hiv), :] = Hiv
                        _Hta_Rmax = np.ones((nstains, ), dtype=np.float32)
                        for i in range(nstains):
                            t = Hiv[:, i]
                            _Hta_Rmax[i] = np.percentile(t[t > 0],
                                                         q=99.,
                                                         axis=0)
                        perc.append([_Hta_Rmax[0], _Hta_Rmax[1]])
                        ind += len(Hiv)
                        continue
                    else:
                        Hivs[ind:ind + len(Hiv), :] = Hiv
                        _Hso_Rmax = np.ones((nstains, ), dtype=np.float32)
                        for i in range(nstains):
                            t = Hiv[:, i]
                            _Hso_Rmax[i] = np.percentile(t[t > 0],
                                                         q=99.,
                                                         axis=0)
                        perc.append([_Hso_Rmax[0], _Hso_Rmax[1]])
                        ind += len(Hiv)

            if file_no == 0:
                print "Target H calculated",
                Hta_Rmax = np.percentile(np.array(perc), 50, axis=0)
                file_no += 1
                del Hivt
                print "\t \t \t \t \t Time since processing started:", round(
                    time.time() - tic, 3)
                ind = 0
                continue

            print "Source H calculated",
            print "\t \t \t \t \t Time since processing started:", round(
                time.time() - tic, 3)
            Hso_Rmax = np.percentile(np.array(perc), 50, axis=0)
            print "H Percentile calculated",
            print "\t \t \t \t Time since processing started:", round(
                time.time() - tic, 3)

            _normfac = np.divide(Hta_Rmax, Hso_Rmax).astype(np.float32)

            print "Color Normalization in progress..."
            count = 0
            ind = 0
            np_max = 1000

            x_max = xdim
            y_max = min(max(int(np_max * np_max / x_max), 1), ydim)
            x_tl = range(0, xdim, x_max)
            y_tl = range(0, ydim, y_max)
            print "Patch-wise color normalization in progress..."
            total = len(x_tl) * len(y_tl)

            prev_progress = 0
            for x in x_tl:
                for y in y_tl:
                    count += 1
                    xx = min(x_max, xdim - x)
                    yy = min(y_max, ydim - y)
                    pix = xx * yy
                    sh = np.array([yy, xx, 3])

                    #Back projection into spatial intensity space (Inverse Beer-Lambert space)

                    sourcenorm[y:y + yy, x:x + xx, :3] = session1.run(
                        source_norm,
                        feed_dict={
                            Hiv2: np.array(Hivs[ind:ind + pix, :]),
                            Wit1: Wi_target,
                            normfac: _normfac,
                            shape: sh,
                            tar_i_0: tar_i0
                        })

                    ind += pix
                    percent = 5 * int(count * 20 / total)  #nearest 5 percent
                    if percent > prev_progress and percent < 100:
                        print str(percent) + " percent complete...",
                        print "\t \t \t \t \t Time since processing started:", round(
                            time.time() - tic, 3)
                        prev_progress = percent
            print "Color Normalization complete!",
            print "\t \t \t \t Time since processing started:", round(
                time.time() - tic, 3)

            p = time.time() - tic
            s = output_direc + base_s.replace(
                ".", "_") + " (using " + base_t.replace(
                    ".", "_") + " " + correc + ").png"
            print "Saving normalized image..."
            cv2.imwrite(s, cv2.cvtColor(sourcenorm, cv2.COLOR_RGB2BGR))
            del sourcenorm
            print "File written to:", s
            print "\t \t \t \t \t \t \t \t \t Total Time:", round(
                time.time() - tic, 3)
            display_separator()

        file_no += 1
        if os.path.exists("H_target"):
            os.remove("H_target")
        if os.path.exists("H_source"):
            os.remove("H_source")
        if os.path.exists("wsi"):
            os.remove("wsi")

    session1.close()
コード例 #17
0
def save_image(filename, image):
    image = tf.image.encode_png(image)
    return tf.write_file(filename, image)
コード例 #18
0
def save_image(filename, image):
    """Saves an image to a PNG file."""
    image = quantize_image(image)
    string = tf.image.encode_png(image)
    return tf.write_file(filename, string)
コード例 #19
0
#AutoA_eval = tf.split(AutoA_eval,1)
#AutoB_eval = tf.split(AutoB_eval,1)
#AutoA_eval.set_shape([288,512,3])
#AutoB_eval.set_shape([288,512,3])
encode_imA = tf.image.encode_jpeg(tf.cast(255 * AutoA_eval[0], tf.uint8))
encode_imB = tf.image.encode_jpeg(tf.cast(255 * AutoB_eval[0], tf.uint8))

encode_imAA = tf.image.encode_jpeg(tf.cast(255 * AutoA[0], tf.uint8))
encode_imBB = tf.image.encode_jpeg(tf.cast(255 * AutoB[0], tf.uint8))

name_A = tf.placeholder(tf.string)
name_B = tf.placeholder(tf.string)
name_AA = tf.placeholder(tf.string)
name_BB = tf.placeholder(tf.string)

fwriteA = tf.write_file(name_A, encode_imA)
fwriteB = tf.write_file(name_B, encode_imB)
fwriteAA = tf.write_file(name_AA, encode_imAA)
fwriteBB = tf.write_file(name_BB, encode_imBB)

diffA = tf.reduce_mean(tf.square(imA - AutoA))
diffB = tf.reduce_mean(tf.square(imB - AutoB))

kf = tf.placeholder(tf.float32)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
    optimizeA = tf.train.AdamOptimizer(kf).minimize(diffA)
    optimizeB = tf.train.AdamOptimizer(kf).minimize(diffB)

#Option for GTX 970 on home PC. Design flaw in that card makes last bit of VRAM Slow
gpu_opt = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
コード例 #20
0
def _get_write_image_ops(eval_dir, filename, images):
    """Create Ops that write images to disk."""
    return tf.write_file(
        '%s/%s' % (eval_dir, filename),
        tf.image.encode_png(data_provider.float_image_to_uint8(images)))
コード例 #21
0
#test -> zdjecie powinno być zgodne z nazwa klasy
print(all_image_paths[2])
print(classNames[imagesClassIdList[2]])

#wczytywanie zdjecia do tensora (macierzy) -> jak na JA :P
img_raw = tf.read_file(all_image_paths[2])

img_tensor = tf.image.decode_jpeg(img_raw,channels=3)

#przycianie obrazka -> kazdy opis naszego obrazu ma "bounding boxy" dp ktorych nalezy obciac
#obrazy
cropped_image = tf.image.crop_to_bounding_box(img_tensor,2,2,100,100)

#testowy zapis
saveImage = tf.image.encode_jpeg(cropped_image)
writer = tf.write_file('test.jpg', saveImage)


print(img_tensor.shape) 

#wszystko fajnie ale sie nie wykonalo
#tu jest takie ala lazy loading dla tensorowych operacji, read_file oraz
#decode_jpeg sie nie wykonaly, dopiero jak zadeklaruje i uruchomie sesje to sie to wykona


with tf.Session() as sess:   
    img = sess.run(img_tensor)
    img2 = sess.run(cropped_image)
    sess.run(writer)
    print(img.shape)
    print(img2.shape)
コード例 #22
0
imsp = tf.shape(im)
impng = tf.reshape(im, [imsp[0], imsp[1] * imsp[2], 1])
impng = tf.cast(
    tf.clip_by_value(impng + 0.5, 0.0, 2.0) / 2.0 * (2**16 - 1), tf.uint16)
impng = tf.image.encode_png(impng)
nzpng = tf.reshape(nz, [imsp[0], imsp[1] * imsp[2], 1])
nzpng = tf.cast(
    tf.clip_by_value(nzpng + 0.5, 0.0, 2.0) / 2.0 * (2**16 - 1), tf.uint16)
nzpng = tf.image.encode_png(nzpng)
dzpng = tf.reshape(dz, [imsp[0], imsp[1] * imsp[2], 1])
dzpng = tf.cast(
    tf.clip_by_value(dzpng + 0.5, 0.0, 2.0) / 2.0 * (2**16 - 1), tf.uint16)
dzpng = tf.image.encode_png(dzpng)

encoded = [impng, nzpng, dzpng]
fwrites = [tf.write_file(fnms[i], encoded[i]) for i in range(len(fnms))]

#########################################################################
# Start TF session (respecting OMP_NUM_THREADS) & load model
nthr = os.getenv('OMP_NUM_THREADS')
if nthr is None:
    sess = tf.Session()
else:
    sess = tf.Session(config=tf.ConfigProto(
        intra_op_parallelism_threads=int(nthr)))
sess.run(tf.global_variables_initializer())

# Load model
wts = np.load(mfile)
ph = tf.placeholder(tf.float32)
for k in wts.keys():
コード例 #23
0
ファイル: infogan_eval.py プロジェクト: Toyben/models
def _get_write_image_ops(eval_dir, filename, images):
  """Create Ops that write images to disk."""
  return tf.write_file(
      '%s/%s'% (eval_dir, filename),
      tf.image.encode_png(data_provider.float_image_to_uint8(images)))
コード例 #24
0
ファイル: bls2017.py プロジェクト: michaelshiyu/compression
def save_image(filename, image):
  """Saves an image to a PNG file."""
  image = quantize_image(image)
  string = tf.image.encode_png(image)
  return tf.write_file(filename, string)
コード例 #25
0
import tensorflow as tf
import cv2

imageObj = cv2.imread('input.jpg')
imageObj = imageObj[:, :, ::-1]
image = tf.convert_to_tensor(imageObj, dtype=tf.float32)
input_layer = tf.reshape(image, [-1, 720, 1280, 3])
pool1 = tf.layers.average_pooling2d(inputs=input_layer,
                                    pool_size=[2, 2],
                                    strides=2)
pool2 = tf.layers.average_pooling2d(inputs=pool1, pool_size=[2, 2], strides=2)
pool3 = tf.layers.average_pooling2d(inputs=pool2, pool_size=[2, 2], strides=2)
pool4 = tf.layers.average_pooling2d(inputs=pool3, pool_size=[2, 2], strides=2)

resized = tf.image.resize_images(pool4, [720, 1280])
output_image = tf.image.encode_jpeg(tf.cast(resized[0], tf.uint8))

file_name = tf.constant('./Ouput_image.jpeg')
with tf.Session():
    tf.write_file(file_name, output_image).run()
コード例 #26
0
def main():
    if (not args.use_gpu):
        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

    # load and build graph
    with tf.Graph().as_default():
        model_input_path = tf.placeholder(tf.string, [])
        model_output_path = tf.placeholder(tf.string, [])

        image = tf.read_file(model_input_path)
        image = [tf.image.decode_png(image, channels=3, dtype=tf.uint8)]
        image = tf.cast(image, tf.float32)

        with tf.gfile.GFile(args.model_name, 'rb') as f:
            model_graph_def = tf.GraphDef()
            model_graph_def.ParseFromString(f.read())

        # add a workaround to support frozen models having input scale as a placeholder
        model_output = None
        if (model_output is None):
            try:
                model_input_scale = tf.constant(4, dtype=tf.float32)
                model_output = tf.import_graph_def(
                    model_graph_def,
                    name='model',
                    input_map={
                        'sr_input:0': image,
                        'sr_input_scale:0': model_input_scale
                    },
                    return_elements=['sr_output:0'])[0]
            except:
                model_output = None
        if (model_output is None):
            try:
                model_output = tf.import_graph_def(
                    model_graph_def,
                    name='model',
                    input_map={'sr_input:0': image},
                    return_elements=['sr_output:0'])[0]
            except:
                model_output = None

        model_output = model_output[0, :, :, :]
        model_output = tf.round(model_output)
        model_output = tf.clip_by_value(model_output, 0, 255)
        model_output = tf.cast(model_output, tf.uint8)

        image = tf.image.encode_png(model_output)
        write_op = tf.write_file(model_output_path, image)

        init = tf.global_variables_initializer()

        sess = tf.Session(config=tf.ConfigProto(log_device_placement=False,
                                                allow_soft_placement=True))
        sess.run(init)

    # get image path list
    image_path_list = []
    for root, subdirs, files in os.walk(args.input_path):
        for filename in files:
            if (filename.lower().endswith('.png')):
                input_path = os.path.join(args.input_path, filename)
                output_path = os.path.join(args.output_path, filename)

                image_path_list.append([input_path, output_path])
    print('Found %d images' % (len(image_path_list)))

    # iterate
    for input_path, output_path in image_path_list:
        print('- %s -> %s' % (input_path, output_path))
        sess.run([write_op],
                 feed_dict={
                     model_input_path: input_path,
                     model_output_path: output_path
                 })

    print('Done')
コード例 #27
0
ファイル: textured.py プロジェクト: won21kr/dirt
def main():

    # Build the scene geometry, which is just an axis-aligned cube centred at the origin in world space
    cube_vertices_object = []
    cube_uvs = []
    cube_faces = []

    def add_quad(vertices, uvs):
        index = len(cube_vertices_object)
        cube_faces.extend([[index + 2, index + 1, index],
                           [index, index + 3, index + 2]])
        cube_vertices_object.extend(vertices)
        cube_uvs.extend(uvs)

    add_quad(vertices=[[-1, -1, 1], [1, -1, 1], [1, 1, 1], [-1, 1, 1]],
             uvs=[[0.1, 0.9], [0.9, 0.9], [0.9, 0.1], [0.1, 0.1]])  # front
    add_quad(vertices=[[-1, -1, -1], [1, -1, -1], [1, 1, -1], [-1, 1, -1]],
             uvs=[[1, 1], [0, 1], [0, 0], [1, 0]])  # back

    add_quad(vertices=[[1, 1, 1], [1, 1, -1], [1, -1, -1], [1, -1, 1]],
             uvs=[[0.3, 0.25], [0.6, 0.25], [0.6, 0.55], [0.3, 0.55]])  # right
    add_quad(vertices=[[-1, 1, 1], [-1, 1, -1], [-1, -1, -1], [-1, -1, 1]],
             uvs=[[0.4, 0.4], [0.5, 0.4], [0.5, 0.5], [0.4, 0.5]])  # left

    add_quad(vertices=[[-1, 1, -1], [1, 1, -1], [1, 1, 1], [-1, 1, 1]],
             uvs=[[0, 0], [2, 0], [2, 2], [0, 2]])  # top
    add_quad(vertices=[[-1, -1, -1], [1, -1, -1], [1, -1, 1], [-1, -1, 1]],
             uvs=[[0, 0], [2, 0], [2, 2], [0, 2]])  # bottom

    cube_vertices_object = np.asarray(cube_vertices_object, np.float32)
    cube_uvs = np.asarray(cube_uvs, np.float32)

    # Load the texture image
    texture = tf.cast(
        tf.image.decode_jpeg(
            tf.read_file(os.path.dirname(__file__) + '/cat.jpg')),
        tf.float32) / 255.

    # Convert vertices to homogeneous coordinates
    cube_vertices_object = tf.concat(
        [cube_vertices_object,
         tf.ones_like(cube_vertices_object[:, -1:])],
        axis=1)

    # Transform vertices from object to world space, by rotating around the vertical axis
    cube_vertices_world = tf.matmul(cube_vertices_object,
                                    matrices.rodrigues([0., 0.6, 0.]))

    # Calculate face normals
    cube_normals_world = lighting.vertex_normals(cube_vertices_world,
                                                 cube_faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation([0., -2.,
                              -3.2]),  # translate it away from the camera
        matrices.rodrigues([-0.5, 0., 0.])  # tilt the view downwards
    )
    cube_vertices_camera = tf.matmul(cube_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    projection_matrix = matrices.perspective_projection(
        near=0.1, far=20., right=0.1, aspect=float(frame_height) / frame_width)
    cube_vertices_clip = tf.matmul(cube_vertices_camera, projection_matrix)

    # The following function is applied to the G-buffer, which is a multi-channel image containing all the vertex attributes. It
    # uses this to calculate the shading (texture and lighting) at each pixel, hence their final intensities
    def shader_fn(gbuffer, texture, light_direction):

        # Unpack the different attributes from the G-buffer
        mask = gbuffer[:, :, :1]
        uvs = gbuffer[:, :, 1:3]
        normals = gbuffer[:, :, 3:]

        # Sample the texture at locations corresponding to each pixel; this defines the unlit material color at each point
        unlit_colors = sample_texture(
            texture, uvs_to_pixel_indices(uvs,
                                          tf.shape(texture)[:2]))

        # Calculate a simple grey ambient lighting component
        ambient_contribution = unlit_colors * [0.4, 0.4, 0.4]

        # Calculate a diffuse (Lambertian) lighting component
        diffuse_contribution = lighting.diffuse_directional(
            tf.reshape(normals, [-1, 3]),
            tf.reshape(unlit_colors, [-1, 3]),
            light_direction,
            light_color=[0.6, 0.6, 0.6],
            double_sided=True)
        diffuse_contribution = tf.reshape(diffuse_contribution,
                                          [frame_height, frame_width, 3])

        # The final pixel intensities inside the shape are given by combining the ambient and diffuse components;
        # outside the shape, they are set to a uniform background color
        pixels = (diffuse_contribution +
                  ambient_contribution) * mask + [0., 0., 0.3] * (1. - mask)

        return pixels

    # Render the G-buffer channels (mask, UVs, and normals at each pixel), then perform the deferred shading calculation
    # In general, any tensor required by shader_fn and wrt which we need derivatives should be included in shader_additional_inputs;
    # although in this example they are constant, we pass the texture and lighting direction through this route as an illustration
    light_direction = tf.linalg.l2_normalize([1., -0.3, -0.5])
    pixels = dirt.rasterise_deferred(
        vertices=cube_vertices_clip,
        vertex_attributes=tf.concat(
            [
                tf.ones_like(cube_vertices_object[:, :1]),  # mask
                cube_uvs,  # texture coordinates
                cube_normals_world  # normals
            ],
            axis=1),
        faces=cube_faces,
        background_attributes=tf.zeros([frame_height, frame_width, 6]),
        shader_fn=shader_fn,
        shader_additional_inputs=[texture, light_direction])

    save_pixels = tf.write_file(
        'textured.jpg', tf.image.encode_jpeg(tf.cast(pixels * 255, tf.uint8)))

    session = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
        allow_growth=True)))
    with session.as_default():

        save_pixels.run()
コード例 #28
0
def val(args):
    ## set hyparameter
    img_mean = np.array((104.00698793, 116.66876762, 122.67891434),
                        dtype=np.float32)
    tf.set_random_seed(args.random_seed)

    ## load data
    image_list, label_list, png_list = read_labeled_image_list(
        args.data_dir,
        is_val=True,
        valid_image_store_path=args.valid_image_store_path)
    num_val = len(image_list)
    image_name = tf.placeholder(dtype=tf.string)
    label_name = tf.placeholder(dtype=tf.string)
    png_name = tf.placeholder(dtype=tf.string)
    image_batch, label_batch = get_validate_data(image_name, label_name,
                                                 img_mean)

    print("data load completed!")

    ## load model
    g_net = choose_generator(args.g_name, image_batch)
    raw_output = g_net.terminals[-1]
    predict_batch = g_net.topredict(raw_output, tf.shape(label_batch)[1:3])
    predict_img = tf.write_file(
        png_name,
        tf.image.encode_png(
            tf.cast(tf.squeeze(predict_batch, axis=0), dtype=tf.uint8)))

    labels, logits = convert_to_calculateloss(raw_output, label_batch,
                                              args.num_classes)
    pre_labels = tf.argmax(logits, 1)

    print("Model load completed!")

    iou, iou_op = tf.metrics.mean_iou(labels,
                                      pre_labels,
                                      args.num_classes,
                                      name='iou')
    acc, acc_op = tf.metrics.accuracy(labels, pre_labels)
    m_op = tf.group(iou_op, acc_op)

    image = tf.py_func(inv_preprocess,
                       [image_batch, args.save_num_images, img_mean], tf.uint8)
    label = tf.py_func(decode_labels, [
        label_batch,
    ], tf.uint8)
    pred = tf.py_func(decode_labels, [
        predict_batch,
    ], tf.uint8)
    tf.summary.image(name='img_collection_val',
                     tensor=tf.concat([image, label, pred], 2))
    tf.summary.scalar(name='iou_val', tensor=iou)
    tf.summary.scalar(name='acc_val', tensor=acc)
    sum_op = tf.summary.merge_all()
    sum_writer = tf.summary.FileWriter(args.log_dir, max_queue=5)

    sess = tf.Session()
    global_init = tf.global_variables_initializer()
    local_init = tf.local_variables_initializer()
    sess.run(global_init)
    sess.run(local_init)

    saver = tf.train.Saver(var_list=tf.global_variables())
    _ = load_weight(args.restore_from, saver, sess)

    if not os.path.exists(args.valid_image_store_path):
        os.makedirs(args.valid_image_store_path)

    print("validation begining")

    for step in range(num_val):
        it = time.time()
        dict = {
            image_name: image_list[step],
            label_name: label_list[step],
            png_name: png_list[step]
        }
        _, _, iou_val = sess.run([m_op, predict_img, iou], dict)
        if step % 50 == 0 or step == num_val - 1:
            summ = sess.run(sum_op, dict)
            sum_writer.add_summary(summ, step)
            print("step:{},time:{},iou:{}".format(step,
                                                  time.time() - it, iou_val))

    print("end......")
コード例 #29
0
    if prev == None:
        lines, depth = generator(
            tf.concat([image, tf.zeros((1, IMG_HEIGHT, IMG_WIDTH, 6))],
                      axis=-1))
    else:
        warped = tf.contrib.image.dense_image_warp(prev, flow)
        warped_depth = tf.contrib.image.dense_image_warp(prev_depth, flow)

        lines, depth = generator(
            tf.concat([image, warped, warped_depth], axis=-1))

    prev = lines
    prev_depth = depth

    def prepare(img):
        img = tf.image.resize_images(img,
                                     size=[IMG_HEIGHT, IMG_WIDTH],
                                     align_corners=True,
                                     method=tf.image.ResizeMethod.BICUBIC)
        img = img * 0.5 + 0.5
        img = tf.squeeze(img)
        img = tf.image.convert_image_dtype(img, dtype=tf.uint8, saturate=True)
        img = tf.image.encode_png(img)
        return img

    lines = prepare(lines)
    depth = prepare(depth)
    tf.write_file(os.path.join(output_dir, "lines", file), lines)
    tf.write_file(os.path.join(output_dir, "depth", file), depth)
コード例 #30
0
D_loss_summary = tf.summary.histogram("D_loss", D_loss)
G_loss_summary = tf.summary.histogram("G_loss", G_loss)

D_train = tf.train.AdamOptimizer(learning_rate).minimize(-D_loss,
                                                         var_list=D_var_list)
G_train = tf.train.AdamOptimizer(learning_rate).minimize(-G_loss,
                                                         var_list=G_var_list)

samples = Generator(Y)
samples_img = tf.reshape(tf.cast(samples * 128, tf.uint8), [-1, 28, 28, 1])
img = tf.image.encode_jpeg(tf.reshape(samples_img, [28, 28, 1]),
                           format='grayscale')
temp_name = tf.constant("./testimages/") + E + tf.constant(
    "_") + J + tf.constant(".jpeg")
fsave = tf.write_file(temp_name, img)

sess = tf.Session()
summary_merge = tf.summary.merge_all()
writer = tf.summary.FileWriter("./logs")
writer.add_graph(sess.graph)

sess.run(tf.global_variables_initializer())

print('Learing Started')

total_batch = int(mnist.train.num_examples / batch_size)

for epoch in range(training_epochs):
    if True:  # (epoch + 1) % 10 == 0 or epoch == 0:
        flag = 1
コード例 #31
0
def save_dataset(dataset, shape, subdir_name):

    #dataset = dataset['train']
    #valid_data = dataset['valid']
    #test_data  = dataset['test']
    print(dataset)
    #print(valid_data)
    #sys.exit(0)

    # create TensorFlow Iterator object
    iterator = Iterator.from_structure(dataset.output_types,
                                       dataset.output_shapes)

    next_element = iterator.get_next()  #features, labels = iterator.get_next()

    # create two initialization ops to switch between the datasets
    train_init_op = iterator.make_initializer(dataset)
    #valid_init_op = iterator.make_initializer(valid_data)

    # 3) Calculate bottleneck in TF
    height, width, color = shape
    x = tf.placeholder(tf.float32, [height, width, 3], name='Placeholder-x')

    #x = x * tf.constant(255.0)

    #resized_input_tensor = tf.reshape(x, [-1, height, width, 3])

    # num_features = 2048, height x width = 224 x 224 pixels
    #assert height, width == hub.get_expected_image_size(module)
    #bottleneck_tensor = module(resized_input_tensor)  # Features with shape [batch_size, num_features]
    #print('bottleneck_tensor:', bottleneck_tensor)

    #bottleneck_data = dict()
    #bottleneck_data['train'] = {'images':[], 'labels':[]}
    #bottleneck_data['valid'] = {'images':[], 'labels':[]}
    #bottleneck_data['test'] =  {'images':[], 'labels':[]}

    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())

        # initialize the iterator on the training data
        sess.run(train_init_op)  # switch to train dataset
        i = 0
        # get each element of the training dataset until the end is reached
        while True:
            i += 1
            try:
                print('i = ', i)
                item = sess.run(next_element)
                #jpeg_image = tf.image.encode_jpeg(item)

                #op = tf.image.encode_jpeg(item, format='rgb', quality=100)
                op = tf.image.encode_jpeg(item)

                #data_np = sess.run(op, feed_dict={ x: item })
                #print(data_np)
                image = op.eval()
                #print(image)
                fname = tf.constant('{0}/augment_{1}.jpg'.format(
                    subdir_name, i))
                wr = tf.write_file(fname, image)
                sess.run(wr)

                #with open('01.jpg', 'wb') as fp:
                #	fp.write(image)

                #tf.write_file(fname, data_np * tf.constant(255.0))
                #print("tf.write_file('1.jpg', jpeg_image)")

                #img, label = elem
                #feature_vectors = bottleneck_tensor.eval(feed_dict={ x : batch[0] })
                #label = elem[1]
                #images = list(map(list, feature_vectors))
                #labels = list(map(list, batch[1]))
                #bottleneck_data['train']['images'] += images
                #bottleneck_data['train']['labels'] += labels
                #print(labels)
            except tf.errors.OutOfRangeError:
                print("End of training dataset.")
                break

    return 0
    """
コード例 #32
0
def main():
    if (not args.use_gpu):
        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

    # load and build graph
    with tf.Graph().as_default():
        model_input_path = tf.placeholder(tf.string, [])
        model_output_path = tf.placeholder(tf.string, [])
        data = tf.placeholder(tf.string, shape=[])

        image = tf.read_file(model_input_path)
        image = [tf.image.decode_png(image, channels=3, dtype=tf.uint8)]
        image = tf.cast(image, tf.float32)

        with tf.gfile.GFile("4pp_eusr_pirm.pb", 'rb') as f:
            model_graph_def = tf.GraphDef()
            model_graph_def.ParseFromString(f.read())

        model_output = tf.import_graph_def(model_graph_def,
                                           name='model',
                                           input_map={'sr_input:0': image},
                                           return_elements=['sr_output:0'])[0]
        print(model_output)
        model_output = model_output[0, :, :, :]
        model_output = tf.round(model_output)
        model_output = tf.clip_by_value(model_output, 0, 255)
        model_output = tf.cast(model_output, tf.uint8)
        print(model_output)
        image = tf.image.encode_png(model_output)  #RIGHT. HERE.
        #image = tf.image.random_brightness(image)

        #image = tf.image.encode_png(image)
        write_op = tf.write_file(model_output_path,
                                 image)  #it's literally right here smartass
        image = tf.image.adjust_saturation(tf.io.decode_png(image), float(100))
        #experiment time
        print(image)
        #image = tf.io.decode_png(image)
        print(image)
        #ttt = image.eval()
        init = tf.global_variables_initializer()
        tf.print(image)
        sess = tf.Session(config=tf.ConfigProto(log_device_placement=False,
                                                allow_soft_placement=True))
        print("post sess declare")

        print(data)
        sess.run(init)
        #print(sess.run([toot],feed_dict={data:image}))
        #with sess.as_default():   # or `with sess:` to close on exit
        #assert sess is tf.get_default_session()
        #assert image.eval() == sess.run(image)
    # get image path list
    image_path_list = []
    image_byte_list = []
    for root, subdirs, files in os.walk(args.input_path):
        for filename in files:
            if (filename.lower().endswith('.png')):
                input_path = os.path.join(args.input_path, filename)
                output_path = os.path.join(args.output_path, filename)

                image_path_list.append([input_path, output_path])
    print('Found %d images' % (len(image_path_list)))
    #global data_format
    # iterate
    for input_path, output_path in image_path_list:
        print('- %s -> %s' % (input_path, output_path))
        sess.run([write_op],
                 feed_dict={
                     model_input_path: input_path,
                     model_output_path: output_path
                 })
        #file = Image.open(output_path,'r')
        #imgbytes = save_image_in_memory(test)

#   image_byte_list.append(imgbytes)
#print(imgbytes)
#test=sess.run()
#print(test)
#sess.run([toot],feed_dict={data:image})
#with sess.as_default():
#print(image.eval())
    print('Done')
コード例 #33
0
ファイル: autoencoder.py プロジェクト: brunoklaus/NN2018
def autoencoder_run(_):
    
    g1 = tf.Graph()
    

    with g1.as_default():
        with tf.device("/cpu:0"):
            ds_dict, init_dict, nxt_dict = read_images()

   
        with tf.device("/gpu:0"):  
            #Define config
            tfconfig = tf.ConfigProto(allow_soft_placement=True)
            tfconfig.gpu_options.per_process_gpu_memory_fraction = 0.5     
            
            #PLACEHOLDERS
            lr = tf.placeholder_with_default(tf.cast(FLAGS.learning_rate,tf.float32), shape=[], name="learning_rate")
            mom = tf.placeholder_with_default(tf.cast(FLAGS.mom1,tf.float32), shape=[], name="momentum")
            IMG = tf.placeholder(dtype=tf.float32,shape= (None,32,32,3),name="placeholder/IMG")
            ZCA = tf.placeholder(dtype=tf.float32,shape= (None,32,32,3),name="placeholder/ZCA")
            
            #BUILD GRAPH
            with tf.variable_scope("CNN") as scope:
                    loss, train_op  = build_training_graph(IMG,ZCA,lr, mom)
                    scope.reuse_variables()
                    # Build eval graph
                    losses_eval_train, sample = build_eval_graph(IMG,ZCA)
                    losses_eval_test, _ = build_eval_graph(IMG,ZCA)
                    latent_space = build_emb_graph(IMG, ZCA)
                    X = tf.placeholder(tf.float32, shape=[None, 128, 128, 3], name="X")
                    latent_space = tf.reshape(tensor=latent_space,shape=[FLAGS.eval_batch_size,-1])
                    
                   
            
            #Create FileWriter
            if not FLAGS.log_dir:
                writer_train = None
                writer_test = None
            else:
                writer_train = tf.summary.FileWriter(FLAGS.log_dir + "/train", g1)
                writer_test = tf.summary.FileWriter(FLAGS.log_dir + "/test", g1)
            #FileWriter for embedding
            if FLAGS.autoencoder_mode=="embedding":
                writer_emb =  tf.summary.FileWriter(FLAGS.log_dir + "emb", g1)
            else:
                writer_emb = None
                        
                        
                        
            with tf.train.MonitoredTrainingSession(\
            checkpoint_dir=FLAGS.train_dir,
            hooks=[],
            config=tfconfig) as mon_sess:
            
                if FLAGS.autoencoder_mode == "embedding":
                    
                    #Get embedded rep
                    mon_sess.run(init_dict["AE_emb"].initializer)
                    bs = FLAGS.eval_batch_size
                    
                    labels = np.zeros((NUM_EXAMPLES_TRAIN))
                    zca = np.zeros((NUM_EXAMPLES_TRAIN,32*32*3))
                    image = np.zeros((NUM_EXAMPLES_TRAIN,32*32*3))
                    
                    
                    
                    for i in range(NUM_EXAMPLES_TRAIN//bs):
                        nxt = mon_sess.run(nxt_dict["AE_emb"])
                        assert (nxt["id"][0] == i * bs)
                        labels[i * bs : (i+1)*bs] = np.reshape(np.argmax(nxt["label"],axis=1),(-1))
                        zca[i * bs : (i+1)*bs,:] = np.reshape(nxt["zca"],(bs,-1))
                        image[i * bs : (i+1)*bs,:] = np.reshape(nxt["image"],(bs,-1))
                        feed_dict = { IMG:nxt["image"],
                                      ZCA:nxt["zca"]}
                        emb = mon_sess.run(latent_space,feed_dict=feed_dict)
                        if i == 0:
                            all_emb = np.zeros((NUM_EXAMPLES_TRAIN,emb.shape[1]))
                        all_emb[i * bs : (i+1)*bs,:] = emb
                    
                    
                    convert_images_and_labels_and_emb(image, labels, zca, all_emb, os.path.join(FLAGS.data_dir, FLAGS.emb_name+'.tfrecords'))
                    
                    return(True)
            
            
                for ep in range(FLAGS.num_epochs):
                    print("EPOCH:{}".format(ep))
                    
                    #Adjust decay if necessary
                    if ep < FLAGS.epoch_decay_start:
                        feed_dict = {lr: FLAGS.learning_rate, mom: FLAGS.mom1}
                        print("MOMENTUM:{},lr:".format(FLAGS.mom1,FLAGS.learning_rate))
                    else:
                        decayed_lr = ((FLAGS.num_epochs - ep) / float(
                            FLAGS.num_epochs - FLAGS.epoch_decay_start)) * FLAGS.learning_rate
                        feed_dict = {lr: decayed_lr, mom: FLAGS.mom2}
                    #Initialize loss,time and iterator
                    sum_loss = 0
                    start = time.time()
                    mon_sess.run(init_dict["AE_train"].initializer)
                    #Run training examples
                    for i in range(FLAGS.num_iter_per_epoch):
                        nxt = mon_sess.run(nxt_dict["AE_train"])
                        feed_dict[IMG] = nxt["image"]
                        feed_dict[ZCA] = nxt["zca"]
                        _, batch_loss  = mon_sess.run([train_op, loss],
                                                    feed_dict=feed_dict)
                        sum_loss += batch_loss
                    #Print elapsed time
                    end = time.time()
                    print("Epoch:", ep, "CE_loss_train:", sum_loss / FLAGS.num_iter_per_epoch,
                           "elapsed_time:", end - start)

                    
                    ''' EVAL Procedure '''
                    if (ep + 1) % FLAGS.eval_freq == 0 or ep + 1 == FLAGS.num_epochs: 
                         
                        #BEGIN: Get Sample
                        sample_img  = mon_sess.run([sample],
                                                        feed_dict=feed_dict)
                        with tf.Graph().as_default():
                            tf.Session().run([tf.write_file("sample.png",tf.image.encode_png(sample_img[0]))])
                            print("saved sample")
                        #END: Get Sample   
                        #EVAL TRAIN
                        classifier_eval(initializer = init_dict["AE_eval_train"].initializer,
                                         nxt_op = nxt_dict["AE_eval_train"],
                                         losses_eval = losses_eval_train,
                                         writer=writer_train,
                                         mon_sess = mon_sess,
                                         IMG=IMG,ZCA=ZCA,description="train")
                        #EVAL TEST
                        classifier_eval(initializer = init_dict["AE_eval_test"].initializer,
                                     nxt_op = nxt_dict["AE_eval_test"],
                                     losses_eval = losses_eval_test,
                                     writer=writer_test,
                                     mon_sess = mon_sess,
                                     IMG=IMG,ZCA=ZCA,description="test")
                        
                        #END Eval Test data
                    ''' END EVAL'''
コード例 #34
0
def save_image(filename, image):
    image = quantize_image(image)
    string = tf.image.encode_png(image)
    return tf.write_file(filename, string)
コード例 #35
0
ファイル: test_vmnet.py プロジェクト: idearibosome/tf-vmnet
def main(unused_argv):
  # initialize
  FLAGS.vmnet_intermediate_outputs = True
  os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.cuda_device
  scale_list = list(map(lambda x: int(x), FLAGS.scales.split(',')))
  tf.logging.set_verbosity(tf.logging.INFO)
  
  # image reading session
  tf_image_read_graph = tf.Graph()
  with tf_image_read_graph.as_default():
    tf_image_read_path = tf.placeholder(tf.string, [])
    
    tf_image = tf.read_file(tf_image_read_path)
    tf_image = tf.image.decode_png(tf_image, channels=3, dtype=tf.uint8)
    
    tf_image_read = tf_image

    tf_image_read_init = tf.global_variables_initializer()
    tf_image_read_session = tf.Session(config=tf.ConfigProto(
        device_count={'GPU': 0}
    ))
    tf_image_read_session.run(tf_image_read_init)

  # image saving session
  tf_image_save_graph = tf.Graph()
  with tf_image_save_graph.as_default():
    tf_image_save_path = tf.placeholder(tf.string, [])
    tf_image_save_image = tf.placeholder(tf.float32, [None, None, 3])
    
    tf_image = tf_image_save_image
    tf_image = tf.round(tf_image)
    tf_image = tf.clip_by_value(tf_image, 0, 255)
    tf_image = tf.cast(tf_image, tf.uint8)
    
    tf_image_png = tf.image.encode_png(tf_image)
    tf_image_save_op = tf.write_file(tf_image_save_path, tf_image_png)

    tf_image_save_init = tf.global_variables_initializer()
    tf_image_save_session = tf.Session(config=tf.ConfigProto(
        device_count={'GPU': 0}
    ))
    tf_image_save_session.run(tf_image_save_init)

  # model
  model = MODEL_MODULE.create_model()
  model.prepare(is_training=False, global_step=FLAGS.restore_global_step)

  # model > restore
  model.restore(ckpt_path=FLAGS.restore_path, target=FLAGS.restore_target)
  tf.logging.info('restored the model')
  
  # get image path list
  image_list = [f for f in os.listdir(FLAGS.input_path) if f.lower().endswith('.png')]
  tf.logging.info('found %d images' % (len(image_list)))

  # iterate
  running_time_list = []
  num_total_outputs = FLAGS.vmnet_recursions // FLAGS.vmnet_recursion_frequency
  for scale in scale_list:
    for image_name in image_list:
      tf.logging.info('- x%d: %s' % (scale, image_name))
      input_image_path = os.path.join(FLAGS.input_path, image_name)
      input_image = tf_image_read_session.run(tf_image_read, feed_dict={tf_image_read_path: input_image_path})
      t1 = time.perf_counter()
      output_images = model.upscale(input_list=[input_image], scale=scale)
      t2 = time.perf_counter()
      running_time = (t2 - t1)

      output_image_ensemble = np.zeros_like(output_images[0][0])
      ensemble_factor_total = 0.0
      
      for i in range(num_total_outputs):
        num_recursions = (i + 1) * FLAGS.vmnet_recursion_frequency
        output_image = output_images[i][0]

        ensemble_factor = 1.0 / (2.0 ** (num_total_outputs-num_recursions))
        output_image_ensemble = output_image_ensemble + (output_image * ensemble_factor)
        ensemble_factor_total += ensemble_factor
      
      output_image = output_image_ensemble / ensemble_factor_total

      output_image_path = os.path.join(FLAGS.output_path, 'x%d' % (scale), os.path.splitext(image_name)[0]+'.png')
      tf_image_save_session.run(tf_image_save_op, feed_dict={tf_image_save_path:output_image_path, tf_image_save_image:output_image})

      running_time_list.append(running_time)

  # finalize
  tf.logging.info('finished')
  tf.logging.info('%.6f sec' % (np.mean(running_time_list)))
コード例 #36
0
cur_number = 0
for file in lst:
    cur_class = file.split('_')[0]
    cur_light = file.split('_')[1]
    cur_source_file = file.split('_')[2]
    cur_frame = file.split('_')[7]
    filename = path_to_augment + "aug_" + cur_class + "_" + cur_light + "_" + video_number + "_" + str(
        cur_number
    ) + "_" + "from_" + cur_source_file + "_" + cur_frame + ".jpg"
    print("Create file:", filename)
    image = cv2.imread(str(path_to_source) + str(file))
    rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    tf_img = tf.convert_to_tensor(rgb)

    sat_image = tf.image.random_saturation(tf_img, lower=0.5, upper=1)
    contrast_image = tf.image.random_contrast(sat_image, lower=0.5, upper=1)
    brght_img = tf.image.random_brightness(contrast_image, max_delta=0.2)
    jpg = tf.image.random_jpeg_quality(brght_img,
                                       min_jpeg_quality=50,
                                       max_jpeg_quality=100)

    enc = tf.image.encode_jpeg(brght_img)
    fwrite = tf.write_file(filename, enc)

    with tf.Session() as sess:
        sess.run(fwrite)

    cur_number += 1

os.system("rm -rf {0}".format(path_to_source))
コード例 #37
0
def main(_, run_eval_loop=True):
    # Fetch and generate images to run through Inception.
    with tf.name_scope('inputs'):
        real_data, num_classes = _get_real_data(FLAGS.num_images_generated,
                                                FLAGS.dataset_dir)
        generated_data = _get_generated_data(FLAGS.num_images_generated,
                                             FLAGS.conditional_eval,
                                             num_classes)

    # Compute Frechet Inception Distance.
    if FLAGS.eval_frechet_inception_distance:
        fid = util.get_frechet_inception_distance(real_data, generated_data,
                                                  FLAGS.num_images_generated,
                                                  FLAGS.num_inception_images)
        tf.summary.scalar('frechet_inception_distance', fid)

    # Compute normal Inception scores.
    if FLAGS.eval_real_images:
        inc_score = util.get_inception_scores(real_data,
                                              FLAGS.num_images_generated,
                                              FLAGS.num_inception_images)
    else:
        inc_score = util.get_inception_scores(generated_data,
                                              FLAGS.num_images_generated,
                                              FLAGS.num_inception_images)
    tf.summary.scalar('inception_score', inc_score)

    # If conditional, display an image grid of difference classes.
    if FLAGS.conditional_eval and not FLAGS.eval_real_images:
        reshaped_imgs = util.get_image_grid(generated_data,
                                            FLAGS.num_images_generated,
                                            num_classes,
                                            FLAGS.num_images_per_class)
        tf.summary.image('generated_data', reshaped_imgs, max_outputs=1)

    # Create ops that write images to disk.
    image_write_ops = None
    if FLAGS.conditional_eval and FLAGS.write_to_disk:
        reshaped_imgs = util.get_image_grid(generated_data,
                                            FLAGS.num_images_generated,
                                            num_classes,
                                            FLAGS.num_images_per_class)
        uint8_images = data_provider.float_image_to_uint8(reshaped_imgs)
        image_write_ops = tf.write_file(
            '%s/%s' % (FLAGS.eval_dir, 'conditional_cifar10.png'),
            tf.image.encode_png(uint8_images[0]))
    else:
        if FLAGS.num_images_generated >= 100 and FLAGS.write_to_disk:
            reshaped_imgs = tfgan.eval.image_reshaper(
                generated_data[:100], num_cols=FLAGS.num_images_per_class)
            uint8_images = data_provider.float_image_to_uint8(reshaped_imgs)
            image_write_ops = tf.write_file(
                '%s/%s' % (FLAGS.eval_dir, 'unconditional_cifar10.png'),
                tf.image.encode_png(uint8_images[0]))

    # For unit testing, use `run_eval_loop=False`.
    if not run_eval_loop: return
    tf.contrib.training.evaluate_repeatedly(
        FLAGS.checkpoint_dir,
        master=FLAGS.master,
        hooks=[
            tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
            tf.contrib.training.StopAfterNEvalsHook(1)
        ],
        eval_ops=image_write_ops,
        max_number_of_evaluations=FLAGS.max_number_of_evaluations)
コード例 #38
0
    swap_memory=True)

#
out_coarse = out_ta_coarse.stack()
out_fine = out_ta_fine.stack()

out = combine_signal(out_coarse, out_fine)

out = tf.squeeze(out, axis=-2)

aud = tf.cast(out, dtype=tf.float32)
aud = aud / 2**15

encoded_audio_data = tf.contrib.ffmpeg.encode_audio(
    aud, file_format="wav", samples_per_second=sample_rate)
write_file_op = tf.write_file("sample.wav", encoded_audio_data)

#
saver = tf.train.Saver()

#
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    #
    ckpt = tf.train.get_checkpoint_state(
        os.path.dirname('checkpoints_wavernn/wavernn'))
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)

    #