def run(dataset_dir):
    """Runs the download and conversion operation.

  Args:
    dataset_dir: The dataset directory where the dataset is stored.
  """
    if not tf.gfile.Exists(dataset_dir):
        tf.gfile.MakeDirs(dataset_dir)

    dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)

    # First, process the training data:
    #with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
    filenames = []
    for i in range(_NUM_TRAIN_FILES):
        filenames.append(
            os.path.join(dataset_dir, 'cifar-10-batches-py',
                         'data_batch_%d' % (i + 1)))  # 1-indexed.
    _add_to_tfrecord(filenames, 'train', dataset_dir)

    # Next, process the testing data:
    #with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
    filenames = []
    filenames.append(
        os.path.join(dataset_dir, 'cifar-10-batches-py', 'test_batch'))
    _add_to_tfrecord(filenames, 'test', dataset_dir)

    # Finally, write the labels file:
    labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
    dataset_utils.write_label_file(labels_to_class_names, dataset_dir)

    _clean_up_temporary_files(dataset_dir)
    print('\nFinished converting the Cifar10 dataset!')
Example #2
0
def run(dataset_dir):
  """Runs the download and conversion operation.

  Args:
    dataset_dir: The dataset directory where the dataset is stored.
  """
  if not tf.gfile.Exists(dataset_dir):
    tf.gfile.MakeDirs(dataset_dir)

  if _dataset_exists(dataset_dir):
    print('Dataset files already exist. Exiting without re-creating them.')
    return

  dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
  photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)
  class_names_to_ids = dict(zip(class_names, range(len(class_names))))

  # Divide into train and test:
  random.seed(_RANDOM_SEED)
  random.shuffle(photo_filenames)
  training_filenames = photo_filenames[_NUM_VALIDATION:]
  validation_filenames = photo_filenames[:_NUM_VALIDATION]

  # First, convert the training and validation sets.
  _convert_dataset('train', training_filenames, class_names_to_ids,
                   dataset_dir)
  _convert_dataset('validation', validation_filenames, class_names_to_ids,
                   dataset_dir)

  # Finally, write the labels file:
  labels_to_class_names = dict(zip(range(len(class_names)), class_names))
  dataset_utils.write_label_file(labels_to_class_names, dataset_dir)

  _clean_up_temporary_files(dataset_dir)
  print('\nFinished converting the Flowers dataset!')
Example #3
0
    def download_pretrain():
        url = 'http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz'

        if not tf.gfile.Exists(checkpoint_dir):
            tf.gfile.MakeDirs(checkpoint_dir)

        dataset_utils.download_and_uncompress_tarball(url, checkpoint_dir)
def run(dataset_dir):
    """Runs the download and conversion operation.
  
    Args:
      dataset_dir: The dataset directory where the dataset is stored.
    """
    if not tf.gfile.Exists(dataset_dir):
        tf.gfile.MakeDirs(dataset_dir)

    if _dataset_exists(dataset_dir):
        print('Dataset files already exist. Exiting without re-creating them.')
        return

    dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
    photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)
    class_names_to_ids = dict(zip(class_names, range(len(class_names))))

    # Divide into train and test:
    random.seed(_RANDOM_SEED)
    random.shuffle(photo_filenames)
    training_filenames = photo_filenames[_NUM_VALIDATION:]
    validation_filenames = photo_filenames[:_NUM_VALIDATION]

    # First, convert the training and validation sets.
    _convert_dataset('train', training_filenames, class_names_to_ids,
                     dataset_dir)
    _convert_dataset('validation', validation_filenames, class_names_to_ids,
                     dataset_dir)

    # Finally, write the labels file:
    labels_to_class_names = dict(zip(range(len(class_names)), class_names))
    dataset_utils.write_label_file(labels_to_class_names, dataset_dir)

    _clean_up_temporary_files(dataset_dir)
    print('\nFinished converting the Flowers dataset!')
def _get_filenames_and_classes(dataset_dir):
    """Returns a list of filenames and inferred class names.

  Args:
    dataset_dir: A directory containing a set of subdirectories representing
      class names. Each subdirectory should contain PNG or JPG encoded images.

  Returns:
    A list of image file paths, relative to `dataset_dir` and the list of
    subdirectories, representing class names.
  """

    dataset_utils.download_and_uncompress_tarball(_DATA_URL, _RAWIMG_)
    flower_root = os.path.join(dataset_dir, 'products')
    directories = []
    class_names = []
    for filename in os.listdir(flower_root):
        path = os.path.join(flower_root, filename)
        if os.path.isdir(path):
            directories.append(path)
            class_names.append(filename)

    photo_filenames = []
    for directory in directories:
        for filename in os.listdir(directory):
            path = os.path.join(directory, filename)
            photo_filenames.append(path)

    return photo_filenames, sorted(class_names)
Example #6
0
def makeDataSet_Flowers():
    from datasets import dataset_utils
    url = 'http://download.tensorflow.org/data/flowers.tar.gz'

    if not tf.gfile.Exists(flower_data_dir):
        tf.gfile.MakeDirs(flower_data_dir)

    dataset_utils.download_and_uncompress_tarball(url, flower_data_dir)
Example #7
0
def download_pretrained_model(url, checkpoint_dir):
    """Download pretrained inception model and store it in checkpoint_dir.

    Parameters:
        url: The url containing the compressed model.
        checkpoint_dir: The directory to save the model.
    """
    if not tf.gfile.Exists(checkpoint_dir):
        tf.gfile.MakeDirs(checkpoint_dir)
    dataset_utils.download_and_uncompress_tarball(url, checkpoint_dir)
def run(dataset_dir, dataset):
    """Runs the download and conversion operation.

  Args:
    dataset_dir: The dataset directory where the dataset is stored.
  """
    if not tf.gfile.Exists(dataset_dir):
        tf.gfile.MakeDirs(dataset_dir)

    training_filename = _get_output_filename(dataset_dir,
                                             'train',
                                             dataset=dataset)
    testing_filename = _get_output_filename(dataset_dir,
                                            'test',
                                            dataset=dataset)

    if tf.gfile.Exists(training_filename) and tf.gfile.Exists(
            testing_filename):
        print('Dataset files already exist. Exiting without re-creating them.')
        return

    dataset_utils.download_and_uncompress_tarball(_DATA_URL[dataset],
                                                  dataset_dir)

    # First, process the training data:
    with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
        offset = 0
        for i in range(_NUM_TRAIN_FILES[dataset]):
            filename = os.path.join(
                dataset_dir, _DATA_DIR[dataset],
                _batch_name('train', offset=i, dataset=dataset))
            offset = _add_to_tfrecord(filename, tfrecord_writer, dataset,
                                      offset)

    # Next, process the testing data:
    with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
        filename = os.path.join(dataset_dir, _DATA_DIR[dataset],
                                _batch_name('test', offset=0, dataset=dataset))
        _add_to_tfrecord(filename, tfrecord_writer, dataset)

    # Finally, write the labels file:
    labels_to_class_names = dict(enumerate(_CLASS_NAMES[dataset]))
    dataset_utils.write_label_file(labels_to_class_names, dataset_dir)

    if dataset == 'cifar100':
        coarse_labels_to_class_names = dict(enumerate(_COARSE_CLASS_NAMES))
        dataset_utils.write_label_file(coarse_labels_to_class_names,
                                       dataset_dir,
                                       filename=_COARSE_LABELS_FILENAME)

    _clean_up_temporary_files(dataset_dir, dataset)
    print('\nFinished converting the %s dataset!' % dataset)
def run(dataset_dir):
  """Runs the download and conversion operation.

  Args:
    dataset_dir: The dataset directory where the dataset is stored.
  """
  if not tf.gfile.Exists(dataset_dir):
    tf.gfile.MakeDirs(dataset_dir)

  if _dataset_exists(dataset_dir):
    print('Dataset files already exist. Exiting without re-creating them.')
    return

  dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
  """
  I would like to remove all '._*' this kind MAC OS created files.
  """
  DIR='/tensorflow/models/research/slim/transfer_learn/birds/images'

  for path, subdirs,files in os.walk(DIR):
    for name in files:
      #FilePath=os.path.join(os.getcwd(), path[32:], name)
      FilePath=os.path.join(path, name)
      #print(os.getcwd())
      print(path)
      print(name)
      print(FilePath)
      if name[:2] == '._':
        os.remove(FilePath)
        print("Deleted!")

  photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)
  class_names_to_ids = dict(zip(class_names, range(len(class_names))))

  # Divide into train and test:
  random.seed(_RANDOM_SEED)
  random.shuffle(photo_filenames)
  training_filenames = photo_filenames[_NUM_VALIDATION:]
  validation_filenames = photo_filenames[:_NUM_VALIDATION]

  # First, convert the training and validation sets.
  _convert_dataset('train', training_filenames, class_names_to_ids,
                   dataset_dir)
  _convert_dataset('validation', validation_filenames, class_names_to_ids,
                   dataset_dir)

  # Finally, write the labels file:
  labels_to_class_names = dict(zip(range(len(class_names)), class_names))
  dataset_utils.write_label_file(labels_to_class_names, dataset_dir)

  _clean_up_temporary_files(dataset_dir)
  print('\nFinished converting the birds dataset!')
def run(dataset_dir):
    """Runs the download and conversion operation.

  Args:
    dataset_dir: The dataset directory where the dataset is stored.
  """
    if not tf.gfile.Exists(dataset_dir):
        tf.gfile.MakeDirs(dataset_dir)

    if _dataset_exists(dataset_dir):
        print('Dataset files already exist. Exiting without re-creating them.')
        return

    dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
def run(dataset_dir, dataset):
  """Runs the download and conversion operation.

  Args:
    dataset_dir: The dataset directory where the dataset is stored.
  """
  if not tf.gfile.Exists(dataset_dir):
    tf.gfile.MakeDirs(dataset_dir)

  training_filename = _get_output_filename(dataset_dir, 'train',
                                           dataset=dataset)
  testing_filename = _get_output_filename(dataset_dir, 'test',
                                          dataset=dataset)

  if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):
    print('Dataset files already exist. Exiting without re-creating them.')
    return

  dataset_utils.download_and_uncompress_tarball(_DATA_URL[dataset], dataset_dir)

  # First, process the training data:
  with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
    offset = 0
    for i in range(_NUM_TRAIN_FILES[dataset]):
      filename = os.path.join(dataset_dir,
                              _DATA_DIR[dataset],
                              _batch_name('train', offset=i, dataset=dataset))
      offset = _add_to_tfrecord(filename, tfrecord_writer, dataset, offset)

  # Next, process the testing data:
  with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
    filename = os.path.join(dataset_dir,
                            _DATA_DIR[dataset],
                            _batch_name('test', offset=0, dataset=dataset))
    _add_to_tfrecord(filename, tfrecord_writer, dataset)

  # Finally, write the labels file:
  labels_to_class_names = dict(enumerate(_CLASS_NAMES[dataset]))
  dataset_utils.write_label_file(labels_to_class_names, dataset_dir)

  if dataset == 'cifar100':
    coarse_labels_to_class_names = dict(enumerate(_COARSE_CLASS_NAMES))
    dataset_utils.write_label_file(coarse_labels_to_class_names, dataset_dir,
                                   filename=_COARSE_LABELS_FILENAME)
    

  _clean_up_temporary_files(dataset_dir, dataset)
  print('\nFinished converting the %s dataset!' % dataset)
Example #12
0
    def __init__(self):
        url = "http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz"
        checkpoints_dir = '/tmp/checkpoints'
        checkpoints_filename = 'inception_resnet_v2_2016_08_30.ckpt'

        if not tf.gfile.Exists(checkpoints_dir):
            tf.gfile.MakeDirs(checkpoints_dir)

        if not tf.gfile.Exists(join(checkpoints_dir, checkpoints_filename)):
            dataset_utils.download_and_uncompress_tarball(url,
                checkpoints_dir)

        self.checkpoints_filename = join(
                checkpoints_dir,
                checkpoints_filename)

        self.model_name = 'InceptionResnetV2'
Example #13
0
    def __init__(self):
        self.cv_bridge = CvBridge()

        self.model_path = rospy.get_param('~model_path', '/tmp/')
        self.model_file = self.model_path + 'inception_v1.ckpt'

        if (not os.path.exists(self.model_file)):
            rospy.logwarn(
                "Model files not present:\n\t{}\nWe will download them from tensorflow."
                .format(self.model_file))
            from datasets import dataset_utils
            url = "http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz"
            dataset_utils.download_and_uncompress_tarball(url, self.model_path)

        self.names = imagenet.create_readable_names_for_imagenet_labels()

        s = rospy.Service('recognize', Roi, self.recognize)
def run(args):
    """Runs the download and conversion operation.

    Args:
      dataset_dir: The dataset directory where the dataset is stored.
    """
    dataset_dir = FLAGS.dataset_dir

    if not tf.gfile.Exists(dataset_dir):
        tf.gfile.MakeDirs(dataset_dir)

    training_filename = _get_output_filename(dataset_dir, 'train')
    testing_filename = _get_output_filename(dataset_dir, 'test')

    # if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):
    #     print('Dataset files already exist. Exiting without re-creating them.')
    #     return

    dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)

    # First, process the training data:
    with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
        offset = 0

        filename = os.path.join(dataset_dir,
                                'cifar-100-python', 'train')  # 1-indexed.
        offset = _add_to_tfrecord(filename, tfrecord_writer, offset)

    # Next, process the testing data:
    with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
        filename = os.path.join(dataset_dir,
                                'cifar-100-python',
                                'test')
        _add_to_tfrecord(filename, tfrecord_writer)

    # Finally, write the labels file:
    labels_to_class_names = dict(zip(range(len(fine_labels_human)), fine_labels_human))
    dataset_utils.write_label_file(labels_to_class_names, dataset_dir)

    labels_to_class_names = dict(zip(range(len(coarse_labels_human)), coarse_labels_human))
    # dataset_utils.write_label_file(labels_to_class_names, dataset_dir, filename='labels-coarse.txt')

    # _clean_up_temporary_files(dataset_dir)
    print('\nFinished converting the Cifar100 dataset!')
Example #15
0
def run(dataset_dir):
    """Runs the download and conversion(n.  转变, 换位, 改宗) operation.
  operation(n.  操作; 经营; 运转; 营运; 手术)

  Args:
    dataset_dir: The dataset directory where the dataset is stored.
  """
    if not tf.gfile.Exists(dataset_dir):
        tf.gfile.MakeDirs(dataset_dir)

    training_filename = _get_output_filename(dataset_dir, 'train')
    testing_filename = _get_output_filename(dataset_dir, 'test')
    # dataset_dir/cifar10_{train/test}.tfrecord

    if tf.gfile.Exists(training_filename) and tf.gfile.Exists(
            testing_filename):
        print('Dataset files already exist. Exiting without re-creating them.')
        return

    dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
    # 下载并解压 tar 文件

    # First, process(v.  加工; 用计算机处理;) the training data:
    with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
        offset = 0
        for i in range(_NUM_TRAIN_FILES):
            filename = os.path.join(dataset_dir, 'cifar-10-batches-py',
                                    'data_batch_%d' % (i + 1))  # 1-indexed.
            offset = _add_to_tfrecord(filename, tfrecord_writer, offset)

    # Next, process the testing data:
    with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
        filename = os.path.join(dataset_dir, 'cifar-10-batches-py',
                                'test_batch')
        _add_to_tfrecord(filename, tfrecord_writer)

    # Finally, write the labels file:
    labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
    dataset_utils.write_label_file(labels_to_class_names, dataset_dir)

    # 清理缓存文件
    _clean_up_temporary_files(dataset_dir)
    print('\nFinished converting the Cifar10 dataset!')
Example #16
0
def run(dataset_dir):
  """Runs the download and conversion operation.

  Args:
    dataset_dir: The dataset directory where the dataset is stored.
  """
  if not tf.gfile.Exists(dataset_dir):
    tf.gfile.MakeDirs(dataset_dir)

  training_filename = _get_output_filename(dataset_dir, 'train')
  testing_filename = _get_output_filename(dataset_dir, 'test')

  if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):
    print('Dataset files already exist. Exiting without re-creating them.')
    return

  dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)

  # First, process the training data:
  with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
    offset = 0
    for i in range(_NUM_TRAIN_FILES):
      filename = os.path.join(dataset_dir,
                              'cifar-10-batches-py',
                              'data_batch_%d' % (i + 1))  # 1-indexed.
      offset = _add_to_tfrecord(filename, tfrecord_writer, offset)

  # Next, process the testing data:
  with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
    filename = os.path.join(dataset_dir,
                            'cifar-10-batches-py',
                            'test_batch')
    _add_to_tfrecord(filename, tfrecord_writer)

  # Finally, write the labels file:
  labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
  dataset_utils.write_label_file(labels_to_class_names, dataset_dir)

  _clean_up_temporary_files(dataset_dir)
  print('\nFinished converting the Cifar10 dataset!')
Example #17
0
    def configure_inception_resnet(self):

        try:
            from nets import inception
            from datasets import dataset_utils
        except:
            import sys
            print(
                "Make sure you have installed tensorflow/models and it's accessible in the environment"
            )
            print("export PYTHONPATH=/home/ubuntu/models/slim")
            sys.exit()

        image_size = inception.inception_resnet_v2.default_image_size

        self.crop_generator = deepprofiler.imaging.cropping.SingleImageCropGenerator(
            self.config, self.dset)
        # Setup pretrained model
        network_input = crop_transform(self.raw_crops, image_size)
        url = self.config["profiling"]["url"]
        checkpoint = self.config["profiling"]["checkpoint"]
        if not os.path.isfile(checkpoint):
            dataset_utils.download_and_uncompress_tarball(
                url, os.path.dirname(checkpoint))
        with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
            _, self.endpoints = inception.inception_resnet_v2(
                network_input, num_classes=1001, is_training=False)
        init_fn = slim.assign_from_checkpoint_fn(checkpoint,
                                                 slim.get_model_variables())

        # Session configuration
        configuration = tf.ConfigProto()
        configuration.gpu_options.allow_growth = True
        configuration.gpu_options.visible_device_list = self.config[
            "profiling"]["gpu"]

        self.sess = tf.Session(config=configuration)
        init_fn(self.sess)
        self.crop_generator.start(self.sess)
    dump_conv2d(name=name + '/Branch_2/Conv2d_0b_3x3')
    dump_conv2d(name=name + '/Branch_2/Conv2d_1a_3x3')


def dump_block8(name='Repeat_2/block8_1'):
    dump_conv2d(name=name + '/Branch_0/Conv2d_1x1')
    dump_conv2d(name=name + '/Branch_1/Conv2d_0a_1x1')
    dump_conv2d(name=name + '/Branch_1/Conv2d_0b_1x3')
    dump_conv2d(name=name + '/Branch_1/Conv2d_0c_3x1')
    dump_conv2d_nobn(name=name + '/Conv2d_1x1')


if not tf.gfile.Exists(checkpoints_dir +
                       'inception_resnet_v2_2016_08_30.ckpt'):
    tf.gfile.MakeDirs(checkpoints_dir)
    dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)

with tf.Graph().as_default():

    # Create model architecture

    from scipy import misc
    img = misc.imread('lena_299.png')
    print(img.shape)

    inputs = np.ones((1, 299, 299, 3), dtype=np.float32)
    inputs[0, 0, 0, 0] = -1
    #inputs[0] = img
    print(inputs.mean())
    print(inputs.std())
    inputs = tf.pack(inputs)
def main():
    if not tf.gfile.Exists(flowers_data_dir):
        tf.gfile.MakeDirs(flowers_data_dir)

    dataset_utils.download_and_uncompress_tarball(url, flowers_data_dir)
Example #20
0
def run(base_dir, ext="jpg", store_results='', smart=False):
    if smart:
        raise NotImplementedError

    using_gpu = tf.test.is_gpu_available()
    if using_gpu:
        logger.info("Running on GPU")
    else:
        from tensorflow.python.framework import test_util as tftest_util
        assert tftest_util.IsMklEnabled(
        ), "This tensorflow is not compiled with MKL. Abort."
        logger.warn("Running on CPU")

    results = []

    # Download and uncompress model
    checkpoint_url = "http://download.tensorflow.org/models/mobilenet_v1_1.0_224_2017_06_14.tar.gz"
    checkpoints_dir = s3dexp.config.CKPT_DIR
    checkpoint_path = os.path.join(checkpoints_dir,
                                   'mobilenet_v1_1.0_224.ckpt')

    if not tf.gfile.Exists(checkpoints_dir):
        tf.gfile.MakeDirs(checkpoints_dir)
        dataset_utils.download_and_uncompress_tarball(checkpoint_url,
                                                      checkpoints_dir)

    with tf.Graph().as_default():
        logger.info("Creating compute graph ...")
        ########################################
        # Select the model
        ########################################
        network_fn = nets_factory.get_network_fn('mobilenet_v1',
                                                 num_classes=1001,
                                                 is_training=False)
        image_size = mobilenet_v1.mobilenet_v1.default_image_size

        ########################################
        # Define input and preprocessing tensors
        ########################################
        # crucial to specify dtype=tf.unit8. Otherwise will get wrong predictions.
        inputs = tf.placeholder(dtype=tf.uint8,
                                shape=(None, image_size, image_size, 3))
        preprocessing_fn = get_preprocessing('mobilenet_v1')
        processed_images = tf.map_fn(
            lambda x: preprocessing_fn(x, image_size, image_size),
            inputs,
            dtype=tf.float32)

        ########################################
        # Create the compute graph
        ########################################
        logits, _ = network_fn(processed_images)
        probabilities = tf.nn.softmax(logits)

        # https://github.com/tensorflow/tensorflow/issues/4196
        # https://www.tensorflow.org/programmers_guide/using_gpu
        config = tf.ConfigProto()
        # config.gpu_options.allow_growth = True
        # config.gpu_options.per_process_gpu_memory_fraction = 0.4
        with tf.Session(config=config) as sess:
            logger.info("Loading checkpoint from %s" % checkpoint_path)
            saver = tf.train.Saver()
            saver.restore(sess, checkpoint_path)

            logger.info("Warm up with a fake image")
            fakeimages = np.random.randint(0,
                                           256,
                                           size=(1, image_size, image_size, 3),
                                           dtype=np.uint8)
            _ = sess.run(probabilities, feed_dict={inputs: fakeimages})

            ########################################
            # walk through directory and inference
            ########################################
            for path in recursive_glob(base_dir, "*.{}".format(ext)):
                tic = time.time()

                if not smart:
                    # 0. read from disk
                    with open(path, 'rb') as f:
                        buf = f.read()
                    read_time = time.time() - tic

                    # 1. image decode
                    arr = cv2.imdecode(np.frombuffer(buf, np.int8),
                                       cv2.IMREAD_COLOR)
                    decode_time = time.time() - tic
                else:
                    raise NotImplementedError

                h, w = arr.shape[:2]

                # 2. Run inference
                # resize
                arr_resized = cv2.resize(arr, (image_size, image_size),
                                         interpolation=cv2.INTER_AREA)
                images = np.expand_dims(arr_resized, 0)
                _ = sess.run(probabilities, feed_dict={inputs: images})

                all_time = time.time() - tic

                logger.debug(
                    "Read {:.1f} ms, Decode {:.1f}, Total {:.1f}. {}".format(
                        read_time * 1000, decode_time * 1000, all_time * 1000,
                        path))

                results.append({
                    'path': path,
                    'read_ms': read_time * 1000,
                    'decode_ms': decode_time * 1000,
                    'total_ms': all_time * 1000,
                    'size': len(buf),
                    'height': h,
                    'width': w
                })

    if store_results:
        logger.info("Writing {} results to DB".format(len(results)))
        dbsess = dbutils.get_session()
        for r in results:
            keys_dict = {
                'path': r['path'],
                'basename': os.path.basename(r['path']),
                'expname': 'mobilenet_inference',
                'device': 'gpu' if using_gpu else 'cpu',
                'disk': 'smart' if smart else 'hdd'
            }

            dbutils.insert_or_update_one(dbsess,
                                         dbmodles.AppExp,
                                         keys_dict=keys_dict,
                                         vals_dict={
                                             'read_ms': r['read_ms'],
                                             'decode_ms': r['decode_ms'],
                                             'total_ms': r['total_ms'],
                                             'size': r['size'],
                                             'height': r['height'],
                                             'width': r['width']
                                         })
        dbsess.commit()
        dbsess.close()
Example #21
0
def profile(config, dset):
    # Variables and cropping comp. graph
    num_channels = len(config["image_set"]["channels"])
    num_classes = dset.numberOfClasses()
    input_vars = learning.training.input_graph(config)
    images = input_vars["labeled_crops"][0]
    labels = tf.one_hot(input_vars["labeled_crops"][1], num_classes)

    # Setup pretrained model
    crop_shape = input_vars["shapes"]["crops"][0]
    raw_crops = tf.placeholder(tf.float32,
                               shape=(None, crop_shape[0], crop_shape[1],
                                      crop_shape[2]))
    network_input = crop_transform(raw_crops)
    url = config["profiling"]["url"]
    checkpoint = config["profiling"]["checkpoint"]
    if not os.path.isfile(checkpoint):
        dataset_utils.download_and_uncompress_tarball(
            url, os.path.dirname(checkpoint))
    with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
        _, endpoints = inception.inception_resnet_v2(network_input,
                                                     num_classes=1001,
                                                     is_training=False)
    init_fn = slim.assign_from_checkpoint_fn(checkpoint,
                                             slim.get_model_variables())

    # Session configuration
    configuration = tf.ConfigProto()
    configuration.gpu_options.allow_growth = True
    configuration.gpu_options.visible_device_list = config["profiling"]["gpu"]

    sess = tf.Session(config=configuration)
    init_fn(sess)

    def check(meta):
        output_file = config["profiling"]["output_dir"] + "/{}_{}_{}.npz"
        output_file = output_file.format(meta["Metadata_Plate"],
                                         meta["Metadata_Well"],
                                         meta["Metadata_Site"])

        # Check if features were computed before
        if os.path.isfile(output_file):
            print("Already done:", output_file)
            return False
        else:
            return True

    # Function to process a single image
    def extract_features(key, image_array, meta):
        output_file = config["profiling"]["output_dir"] + "/{}_{}_{}.npz"
        output_file = output_file.format(meta["Metadata_Plate"],
                                         meta["Metadata_Well"],
                                         meta["Metadata_Site"])

        # Prepare image and crop locations
        batch_size = config["training"]["minibatch"]
        image_key, image_names = dset.getImagePaths(meta)
        locations = [
            learning.cropping.getLocations(image_key, config, randomize=False)
        ]
        if len(locations[0]) == 0:
            print("Empty locations set:", str(key))
            return
        # Pad last batch with empty locations
        pads = batch_size - len(locations[0]) % batch_size
        zero_pads = np.zeros(shape=(pads, 2), dtype=np.int32)
        pad_data = pandas.DataFrame(columns=locations[0].columns,
                                    data=zero_pads)
        locations[0] = pandas.concat((locations[0], pad_data))

        # Prepare boxes, indices, labels and push the image to the queue
        labels_data = [meta[config["training"]["label_field"]]]
        boxes, box_ind, labels_data = learning.cropping.prepareBoxes(
            locations, labels_data, config)
        images_data = np.reshape(image_array, input_vars["shapes"]["batch"])

        sess.run(
            input_vars["enqueue_op"], {
                input_vars["image_ph"]: images_data,
                input_vars["boxes_ph"]: boxes,
                input_vars["box_ind_ph"]: box_ind,
                input_vars["labels_ph"]: labels_data
            })

        # Collect crops of from the queue
        items = sess.run(input_vars["queue"].size())
        #TODO: move the channels to the last axis
        data = np.zeros(shape=(num_channels, len(locations[0]), num_features))
        b = 0
        start = tic()
        while items >= batch_size:
            # Compute features in a batch of crops
            crops = sess.run(images)
            feats = sess.run(endpoints['PreLogitsFlatten'],
                             feed_dict={raw_crops: crops})
            # TODO: move the channels to the last axis using np.moveaxis
            feats = np.reshape(feats, (num_channels, batch_size, num_features))
            data[:, b * batch_size:(b + 1) * batch_size, :] = feats
            items = sess.run(input_vars["queue"].size())
            b += 1

        # Save features
        # TODO: save data with channels in the last axis
        np.savez_compressed(output_file, f=data[:, :-pads, :])
        toc(image_key + " (" + str(data.shape[1] - pads) + ") cells", start)

    dset.scan(extract_features, frame="all", check=check)
    print("Profiling: done")
Example #22
0
import tensorflow as tf
from datasets import dataset_utils

url = "http://download.tensorflow.org/data/flowers.tar.gz"
flowers_data_dir = '/home/michael/Desktop/learning-tf/flower'

if not tf.gfile.Exists(flowers_data_dir):
    tf.gfile.MakeDirs(flowers_data_dir)

dataset_utils.download_and_uncompress_tarball(url, flowers_data_dir)
Example #23
0
def build_graph(cluster, image_url, return_list):
    prob_list = return_list
    num_workers = cluster.num_tasks('worker')
    
    # default picture for testing
    if image_url == None:
        image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/7/7e/Bow_bow.jpg/800px-Bow_bow.jpg"
    image_string = urllib.urlopen(image_url).read()
    #image_string = tf.read_file("/home/philiptkd/Downloads/Dependency_Tree.png") # I lost internet
    image_size = inception.inception_v1_dist.default_image_size
    
    # shared done list, ready list, and image
    with tf.device("/job:ps/task:0"):
        done_list = tf.get_variable("done_list", [num_workers+1], tf.int32, tf.zeros_initializer)
        ready_list = tf.get_variable("ready_list", [num_workers], tf.int32, tf.zeros_initializer)
    with tf.device("/job:worker/task:0"):
        # image
        image = tf.image.decode_jpeg(image_string, channels=3)
        processed_image = inception_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)
        processed_images  = tf.expand_dims(processed_image, 0)
        shared_image = tf.Variable(processed_images, name="shared_image") 

    #download the inception v1 checkpoint if we need to 
    url = "http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz"
    checkpoints_dir = '/tmp/checkpoints'
    if not tf.gfile.Exists(checkpoints_dir):
        tf.gfile.MakeDirs(checkpoints_dir)
    if not tf.gfile.Exists(checkpoints_dir+'/inception_v1_2016_08_28.tar.gz'):
        dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)
    # end download

    server = tf.train.Server(cluster, job_name="ps", task_index=0)
    sess = tf.Session(target=server.target)

    # Create the model, use the default arg scope to configure the batch norm parameters.
    with slim.arg_scope(inception.inception_v1_dist_arg_scope()):
        logits, _ = inception.inception_v1_dist(shared_image, num_workers, num_classes=1001, is_training=False, reuse=tf.AUTO_REUSE)
        probabilities = tf.nn.softmax(logits)

    # initialization function that uses saved parameters
    init_fn = slim.assign_from_checkpoint_fn(
        os.path.join(checkpoints_dir, 'inception_v1.ckpt'),
        slim.get_model_variables('InceptionV1'))
    sess.run(tf.initialize_variables([done_list, ready_list, shared_image])) # initialize variables that aren't model parameters
    init_fn(sess)
    
    # wait for workers to acknowledge variables have been initialized
    while sess.run(tf.reduce_sum(ready_list)) < num_workers:
        pass

    # do the thing
    print("before getting probs")
    run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_metadata = tf.RunMetadata()
    np_image, probabilities = sess.run([shared_image, probabilities], options=run_options, run_metadata=run_metadata)
    print("after getting probs")

    # see who did what
    for device in run_metadata.step_stats.dev_stats:
        print(device.device)
        for node in device.node_stats:
            print("  ", node.node_name)

    # indicate that the ps task is done
    sess.run(tf.scatter_update(done_list, [0], 1))
   
    # wait until all tasks are done
    num_done = 1
    while num_done < num_workers+1:
        num_done = sess.run(tf.reduce_sum(done_list)) 

    sess.close()

    probabilities = probabilities[0, 0:]
    sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])]

    names = imagenet.create_readable_names_for_imagenet_labels()
    for i in range(5):
        index = sorted_inds[i]
        probability = 'Probability %0.2f%% => [%s]' % (probabilities[index] * 100, names[index])
        prob_list.append(probability)
        print(probability)
Example #24
0
p.add_argument('--image_width', type=int, default=None, help='Target image width after resizing. If None original image width is used.')

FLAGS = p.parse_args()

if __name__ == '__main__':
  # check required input arguments
  if not FLAGS.project_name:
    raise ValueError('You must supply a dataset name with --project_name')
  if not FLAGS.dataset_name:
    raise ValueError('You must supply a dataset name with --dataset_name')

  # check dataset name and image directory
  if FLAGS.dataset_name == 'flowers' and not FLAGS.image_dir:
    # download flowers dataset
    image_dir = os.path.join('./image_dir', 'flower_photos')
    dataset_utils.download_and_uncompress_tarball(FLOWERS_DATA_URL, './image_dir')
  else:
    if not FLAGS.image_dir:
      raise ValueError('You must supply an image directory with --image_dir')
    image_dir= FLAGS.image_dir

  # set project_dir and convert images to tfrecord
  project_dir = os.path.join(FLAGS.project_dir, FLAGS.project_name)
  convert_dataset.convert_img_to_tfrecord(project_dir,
                          FLAGS.dataset_name,
                          FLAGS.dataset_dir,
                          image_dir,
                          FLAGS.train_percentage,
                          FLAGS.validation_percentage,
                          FLAGS.test_percentage,
                          FLAGS.image_height,
import tensorflow as tf
import os
from datasets import dataset_utils

url_inception_v1 = "http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz"
url_vgg = "http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz"
checkpoints_dir = '/tmp/checkpoints'

if not tf.gfile.Exists(checkpoints_dir):
   tf.gfile.MakeDirs(checkpoints_dir)

if not tf.gfile.Exists(os.path.join(checkpoints_dir, 'inception_v1.ckpt')):
   dataset_utils.download_and_uncompress_tarball(url_inception_v1, checkpoints_dir)
if not tf.gfile.Exists(os.path.join(checkpoints_dir, 'vgg_16.ckpt')):
   dataset_utils.download_and_uncompress_tarball(url_vgg, checkpoints_dir)
"""
This file downloads and decompresses a checkpoint file containing
the latest inception_resnet_v2 weights.

"""
import sys
import os

dir = os.path.dirname(__file__)
slimFolderPath = os.path.join(dir, '../../../Dependencies/models/slim')

sys.path.append(slimFolderPath)

from datasets import dataset_utils
import tensorflow as tf

# model checkpoint
url = "http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz"

# relative download directory
checkpointsFolderPath = os.path.join(dir, 'checkpoints/')

if not tf.gfile.Exists(checkpointsFolderPath):
    tf.gfile.MakeDirs(checkpointsFolderPath)

dataset_utils.download_and_uncompress_tarball(url, checkpointsFolderPath)
Example #27
0
def download_model(url, checkpoints_dir):
    if not tf.gfile.Exists(checkpoints_dir):
        tf.gfile.MakeDirs(checkpoints_dir)

    dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)
def write_weights(path):
    checkpoints_dir = os.path.join(path, 'checkpoints', 'NASNet-A_Large_331')
    print('checkpoints_dir', checkpoints_dir)
    weights_dir = os.path.join(path, 'weights', 'NASNet-A_Large_331')
    print('weights_dir', weights_dir)

    # download model
    file_checkpoint = os.path.join(checkpoints_dir, 'model.ckpt.index')
    if not tf.gfile.Exists(file_checkpoint):
        tf.gfile.MakeDirs(checkpoints_dir)
        dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)

    file_checkpoint = os.path.join(checkpoints_dir, 'model.ckpt')

    with tf.Graph().as_default():
        # Create model architecture

        image_size = 331
        print('image_size', image_size)
        inputs_np = np.ones((1, image_size, image_size, 3), dtype=np.float32)
        #inputs_np = np.load(weights_dir + '/input.npy')
        print('input', inputs_np.shape)

        inputs = tf.constant(inputs_np, dtype=tf.float32)

        with slim.arg_scope(nasnet_large_arg_scope()):
            logits, _ = nasnet.build_nasnet_large(inputs, num_classes=1001, is_training=False)

        with tf.Session() as sess:
            # Initialize model
            init_fn = slim.assign_from_checkpoint_fn(file_checkpoint, slim.get_model_variables())
            init_fn(sess)

            # Display model variables
            for v in slim.get_model_variables():
                print('name = {}, shape = {}'.format(v.name, v.get_shape()))

            # Create graph
            os.system("rm -rf logs")
            os.system("mkdir -p logs")

            writer = tf.summary.FileWriter('logs', graph=tf.get_default_graph())

            # conv0
            dump_conv2d(sess=sess, path=weights_dir, name='conv0')
            dump_bn(sess=sess, path=weights_dir, name='conv0_bn')

            # cell_stem
            dump_cell_stem_0(sess=sess, path=weights_dir, name='cell_stem_0')
            dump_cell_stem_1(sess=sess, path=weights_dir, name='cell_stem_1')

            dump_first_cell(sess=sess, path=weights_dir, name='cell_0')
            dump_normal_cell(sess=sess, path=weights_dir, name='cell_1')
            dump_normal_cell(sess=sess, path=weights_dir, name='cell_2')
            dump_normal_cell(sess=sess, path=weights_dir, name='cell_3')
            dump_normal_cell(sess=sess, path=weights_dir, name='cell_4')
            dump_normal_cell(sess=sess, path=weights_dir, name='cell_5')

            dump_reduction_cell(sess=sess, path=weights_dir, name='reduction_cell_0')

            dump_first_cell(sess=sess, path=weights_dir, name='cell_6')
            dump_normal_cell(sess=sess, path=weights_dir, name='cell_7')
            dump_normal_cell(sess=sess, path=weights_dir, name='cell_8')
            dump_normal_cell(sess=sess, path=weights_dir, name='cell_9')
            dump_normal_cell(sess=sess, path=weights_dir, name='cell_10')
            dump_normal_cell(sess=sess, path=weights_dir, name='cell_11')

            dump_reduction_cell(sess=sess, path=weights_dir, name='reduction_cell_1')

            dump_first_cell(sess=sess, path=weights_dir, name='cell_12')
            dump_normal_cell(sess=sess, path=weights_dir, name='cell_13')
            dump_normal_cell(sess=sess, path=weights_dir, name='cell_14')
            dump_normal_cell(sess=sess, path=weights_dir, name='cell_15')
            dump_normal_cell(sess=sess, path=weights_dir, name='cell_16')
            dump_normal_cell(sess=sess, path=weights_dir, name='cell_17')

            dump_final_layer(sess, weights_dir, name='final_layer')
Example #29
0
def run(base_dir,
        ext="jpg",
        store_results='',
        smart=False,
        batch_size=8,
        num_parallel_calls=None,
        etl_only=False):
    # adjust default parameters
    if not num_parallel_calls:
        num_parallel_calls = batch_size

    # GPU or CPU?
    using_gpu = tf.test.is_gpu_available()
    if using_gpu:
        logger.info("Running on GPU")
    else:
        from tensorflow.python.framework import test_util as tftest_util
        assert tftest_util.IsMklEnabled(
        ), "This tensorflow is not compiled with MKL. Abort."
        logger.warn("Running on CPU")

    # Download and uncompress model
    checkpoint_url = "http://download.tensorflow.org/models/mobilenet_v1_1.0_224_2017_06_14.tar.gz"
    checkpoints_dir = s3dexp.config.CKPT_DIR
    checkpoint_path = os.path.join(checkpoints_dir,
                                   'mobilenet_v1_1.0_224.ckpt')
    if not tf.gfile.Exists(checkpoints_dir):
        tf.gfile.MakeDirs(checkpoints_dir)
        dataset_utils.download_and_uncompress_tarball(checkpoint_url,
                                                      checkpoints_dir)

    # Prepare the `load_and_preprocess_fn` function to be passed into Dataset.map
    # NOTE: in graph mode, this function takes in tensor and adds operators to the graph
    if not smart:

        def load_and_preprocess_fn(path):  # path is tensor
            # 0. read from disk
            raw = tf.read_file(path)
            # 1. image decode
            image = tf.image.decode_jpeg(
                raw, channels=3
            )  # tf.image.decoe_image() doesn't return shape, causing error  https://stackoverflow.com/questions/44942729/tensorflowvalueerror-images-contains-no-shape
            # 2. resize
            image_resize = tf.image.resize_images(image,
                                                  (image_size, image_size))
            return image_resize  # Tensor
    else:
        # TODO use our smart storage here
        raise NotImplementedError

        def load_and_preprocess_fn(path):
            def smart_fn(path):
                # this pure Python funciton will actually be called many times, by multiple threads if num_parallel_calls>1
                logger.debug("Enter smart_fn. Path {}".format(path))
                # TODO replace with real smart storage logic
                fakeimage = np.random.randint(0,
                                              256,
                                              size=(image_size, image_size, 3),
                                              dtype=np.uint8)
                logger.debug("Exit smart_fn")
                return fakeimage

            out_op = tf.py_func(smart_fn, [path], tf.uint8)
            out_op.set_shape([image_size, image_size,
                              3])  # must explicitly set shape to avoid error
            return out_op

    results = []

    with tf.Graph().as_default():
        logger.info("Creating compute graph ...")
        ########################################
        # Select the model
        ########################################
        network_fn = nets_factory.get_network_fn('mobilenet_v1',
                                                 num_classes=1001,
                                                 is_training=False)
        image_size = mobilenet_v1.mobilenet_v1.default_image_size

        ########################################
        # Create a tf.data.Dataset with batching
        ########################################
        all_paths = list(recursive_glob(base_dir, "*.{}".format(ext)))
        logger.info("Found {} paths".format(len(all_paths)))
        path_ds = tf.data.Dataset.from_tensor_slices(all_paths)
        image_ds = path_ds.map(
            load_and_preprocess_fn,
            num_parallel_calls=num_parallel_calls).batch(batch_size)
        # create iterator
        iterator = image_ds.make_initializable_iterator()
        batch_of_images = iterator.get_next()

        ########################################
        # Define input and preprocessing tensors
        ########################################
        preprocessing_fn = get_preprocessing('mobilenet_v1')
        processed_images = tf.map_fn(
            lambda x: preprocessing_fn(x, image_size, image_size),
            batch_of_images,
            dtype=tf.float32)

        ########################################
        # Create the compute graph
        ########################################
        logits, _ = network_fn(processed_images)
        probabilities = tf.nn.softmax(logits)

        config = tf.ConfigProto()
        with tf.Session(config=config) as sess:
            logger.info("Loading checkpoint from %s" % checkpoint_path)
            saver = tf.train.Saver()
            saver.restore(sess, checkpoint_path)

            # initialize Dataset iterator
            sess.run(iterator.initializer)

            logger.info("Warm up with a fake batch")
            fakeimages = np.random.randint(0,
                                           256,
                                           size=(batch_size, image_size,
                                                 image_size,
                                                 3)).astype(np.float32)
            _ = sess.run(probabilities,
                         feed_dict={processed_images: fakeimages})

            try:
                count_image = 0
                count_batch = 0
                elapsed = 0.
                tic = time.time()
                while True:
                    if etl_only:
                        res = sess.run(batch_of_images)
                    else:
                        res = sess.run(probabilities)

                    toc = time.time()
                    logger.debug(
                        "Batch {}, batch size {}, elapsed {:.1f}".format(
                            count_batch, res.shape[0],
                            1000 * (toc - tic - elapsed)))

                    if res.shape[0] < batch_size:
                        # discard last batch
                        continue
                    else:
                        elapsed = toc - tic
                        count_batch += 1
                        count_image += batch_size

            except tf.errors.OutOfRangeError:
                pass
            finally:
                logger.info(
                    "Ran {} batches, {} images, batch size {}, avg ms/image {:.2f}"
                    .format(count_batch, count_image, batch_size,
                            elapsed * 1000 / count_image))
from datasets import dataset_utils
from src.utils import helper

import argparse
import os

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Default argument')
    parser.add_argument('-c',
                        dest="config_filename", type=str, required=True,
                        help='the config file name must be provide')
    args = parser.parse_args()

    arg_config = helper.parse_config_file(args.config_filename)

    url = "http://download.tensorflow.org/data/flowers.tar.gz"
    flowers_data_dir = '/data/flowers'

    if not tf.gfile.Exists(flowers_data_dir):
        tf.gfile.MakeDirs(flowers_data_dir)

    # dataset_utils.download_and_uncompress_tarball(url, flowers_data_dir)

    checkpoints_dir = os.path.join('/data/pretain_model', arg_config.PRETAIN_MODEL)

    if not tf.gfile.Exists(checkpoints_dir):
        tf.gfile.MakeDirs(checkpoints_dir)

    dataset_utils.download_and_uncompress_tarball(arg_config.PRETAIN_MODEL_URL, checkpoints_dir)

Example #31
0
# Base url
TF_MODELS_URL = "http://download.tensorflow.org/models/"
# Modify this path for a different CNN
INCEPTION_V3_URL = TF_MODELS_URL + "inception_v3_2016_08_28.tar.gz"
INCEPTION_V4_URL = TF_MODELS_URL + "inception_v4_2016_09_09.tar.gz"
# Directory to save model checkpoints
MODELS_DIR = "models/cnn"
INCEPTION_V3_CKPT_PATH = MODELS_DIR + "/inception_v3.ckpt"
INCEPTION_V4_CKPT_PATH = MODELS_DIR + "/inception_v4.ckpt"
# Make the model directory if it does not exist
if not tf.gfile.Exists(MODELS_DIR):
    tf.gfile.MakeDirs(MODELS_DIR)

# Download the appropriate model if haven't already done so
if not os.path.exists(INCEPTION_V3_CKPT_PATH):
    dataset_utils.download_and_uncompress_tarball(INCEPTION_V3_URL, MODELS_DIR)

if not os.path.exists(INCEPTION_V4_CKPT_PATH):
    dataset_utils.download_and_uncompress_tarball(INCEPTION_V4_URL, MODELS_DIR)


#Function takes in the name of the image and optionally the network to use for predictions
#Currently, the only options for the net are Inception V3 and Inception V4.
#Plots the raw image and displays the top-10 class predictions.
def predict(image, version='V3'):
    tf.reset_default_graph()
    # Process the image
    raw_image, processed_image = process_image(image)
    class_names = imagenet.create_readable_names_for_imagenet_labels()
    # Create a placeholder for the images
    X = tf.placeholder(tf.float32, [None, 299, 299, 3], name="X")
Example #32
0
def write_weights(path, nas_type):
    checkpoints_dir = os.path.join(
        path, 'checkpoints',
        'NASNet-A_Large_331' if nas_type == 'large' else 'NASNet-A_Mobile_224')
    print('checkpoints_dir', checkpoints_dir)
    weights_dir = os.path.join(
        path, 'weights',
        'NASNet-A_Large_331' if nas_type == 'large' else 'NASNet-A_Mobile_224')
    print('weights_dir', weights_dir)

    # download model
    file_checkpoint = os.path.join(checkpoints_dir, 'model.ckpt.index')
    if not tf.gfile.Exists(file_checkpoint):
        tf.gfile.MakeDirs(checkpoints_dir)
        dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)

    file_checkpoint = os.path.join(checkpoints_dir, 'model.ckpt')

    with tf.Graph().as_default():
        # Create model architecture

        image_size = 224 if nas_type == 'mobile' else 331
        print('image_size', image_size)
        num_classes = 1001
        inputs_np = np.ones((1, image_size, image_size, 3), dtype=np.float32)
        #inputs_np = np.load(weights_dir + '/input.npy')
        print('input', inputs_np.shape)

        inputs = tf.constant(inputs_np, dtype=tf.float32)

        with slim.arg_scope(nasnet_mobile_arg_scope() if nas_type ==
                            'mobile' else nasnet_large_arg_scope()):
            build_nasnet = getattr(
                nasnet, 'build_nasnet_mobile'
                if nas_type == 'mobile' else 'build_nasnet_large')
            logits, _ = build_nasnet(inputs,
                                     num_classes=num_classes,
                                     is_training=False)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            # Initialize model
            init_fn = slim.assign_from_checkpoint_fn(
                file_checkpoint, slim.get_model_variables())
            init_fn(sess)

            # Display model variables
            for v in slim.get_model_variables():
                print('name = {}, shape = {}'.format(v.name, v.get_shape()))

            # Create graph
            os.system("rm -rf logs")
            os.system("mkdir -p logs")

            writer = tf.summary.FileWriter('logs',
                                           graph=tf.get_default_graph())

            # conv0
            dump_conv2d(sess=sess, path=weights_dir, name='conv0')
            dump_bn(sess=sess, path=weights_dir, name='conv0_bn')

            # cell_stem
            dump_cell_stem_0(sess=sess, path=weights_dir, name='cell_stem_0')
            dump_cell_stem_1(sess=sess, path=weights_dir, name='cell_stem_1')

            num_normal_cells = nas_type == 'mobile' and 4 or 6
            cell_id = 0
            for i in range(3):
                dump_first_cell(sess=sess,
                                path=weights_dir,
                                name='cell_' + str(cell_id))
                cell_id += 1
                for _ in range(num_normal_cells - 1):
                    dump_normal_cell(sess=sess,
                                     path=weights_dir,
                                     name='cell_' + str(cell_id))
                    cell_id += 1
                if i < 2:
                    dump_reduction_cell(sess=sess,
                                        path=weights_dir,
                                        name='reduction_cell_' + str(i))
                else:
                    dump_final_layer(sess, weights_dir, name='final_layer')
from datasets import dataset_utils
import tensorflow as tf
from urllib.request import urlopen
from nets import vgg
from preprocessing import vgg_preprocessing
import os

# 팁을 하나 추가하자면, 아래의 예제코드는 vgg-16모델을 내려받아 압축 해제한 후 실행해야 한다. 실행할 때마다 내려받는 게 단점이긴 하지만
# 다음과 같은 코드를 삽입하면 미리 내려받지 않고 테스트 가능하다.
vgg_url = "http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz"
# vgg-16 모델을 내려받아 압축을 해제한 위치(이 경로에 vgg_16.ckpt 파일이 있어야 함)
target_dir = '/Users/gamgoon/git/learning.tensorflow/checkpoints'
if not tf.gfile.Exists(target_dir):
    tf.gfile.MakeDirs(target_dir)
dataset_utils.download_and_uncompress_tarball(vgg_url, target_dir)

# target_dir = '/Users/gamgoon/git/learning.tensorflow/checkpoints'
url = ("http://54.68.5.226/car.jpg")
im_as_string = urlopen(url).read()
image = tf.image.decode_jpeg(im_as_string, channels=3)

image_size = vgg.vgg_16.default_image_size

processed_im = vgg_preprocessing.preprocess_image(image,
                                                  image_size,
                                                  image_size,
                                                  is_training=False)
processed_images = tf.expand_dims(processed_im, 0)

with slim.arg_scope(vgg.vgg_arg_scope()):