コード例 #1
0
def run(dataset_dir):

    if not tf.gfile.Exists(dataset_dir):
        tf.gfile.MakeDirs(dataset_dir)

    if _dataset_exists(dataset_dir):
        print('Dataset files already exist. Exiting without re-creating them.')
        return

    dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
    photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)
    class_names_to_ids = dict(zip(class_names, range(len(class_names))))

    # Divide into train and test:
    random.seed(_RANDOM_SEED)
    random.shuffle(photo_filenames)
    training_filenames = photo_filenames[_NUM_VALIDATION:]
    validation_filenames = photo_filenames[:_NUM_VALIDATION]

    _convert_dataset('train', training_filenames, class_names_to_ids,
                     dataset_dir)
    _convert_dataset('validation', validation_filenames, class_names_to_ids,
                     dataset_dir)

    labels_to_class_names = dict(zip(range(len(class_names)), class_names))
    dataset_utils.write_label_file(labels_to_class_names, dataset_dir)

    _clean_up_temporary_files(dataset_dir)
    print('\nFinished converting the Flowers dataset!')
コード例 #2
0
def run(dataset_dir):
    """Runs the download and conversion operation.
    Args:
      dataset_dir: The dataset directory where the dataset is stored.
    """
    if not tf.gfile.Exists(dataset_dir):
        tf.gfile.MakeDirs(dataset_dir)

    if _dataset_exists(dataset_dir):
        print('Dataset files already exist. Exiting without re-creating them.')
        return

    dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
    photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)
    class_names_to_ids = dict(zip(class_names, range(len(class_names))))

    # Divide into train and test:
    random.seed(_RANDOM_SEED)
    random.shuffle(photo_filenames)
    training_filenames = photo_filenames[_NUM_VALIDATION:]
    validation_filenames = photo_filenames[:_NUM_VALIDATION]

    # First, convert the training and validation sets.
    _convert_dataset('train', training_filenames, class_names_to_ids,
                     dataset_dir)
    _convert_dataset('validation', validation_filenames, class_names_to_ids,
                     dataset_dir)

    # Finally, write the labels file:
    labels_to_class_names = dict(zip(range(len(class_names)), class_names))
    write_label_file(labels_to_class_names, dataset_dir)

    # _clean_up_temporary_files(dataset_dir)
    print('\nFinished converting the Flowers dataset!')
コード例 #3
0
def getvector(imagedir):
  slim = tf.contrib.slim

  batch_size = 3
  image_size = v3.inception_v3.default_image_size

  url = "http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz"
  checkpoints_dir = os.getcwd()

  if not tf.gfile.Exists(checkpoints_dir + '/inception_v3.ckpt'):
    dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)

  with tf.Graph().as_default():
    # imagedir = '/home/jiexun/Desktop/Siraj/ImageChallenge/Necessary/train/cat.0.jpg'
    image_string = tf.read_file(imagedir)
    image = tf.image.decode_jpeg(image_string, channels=3)
    
    #Intentando reparar el resize
    '''
    print(image.shape)
    print(image)
    image=tf.cast(image, tf.float32)
    image=tf.image.resize_images(image, [299, 299])
    print(image.shape)
    print(image)
    image_jpg = tf.image.encode_jpeg(image)
    plt.imshow(image_jpg)
    '''
    #fin de parche

    processed_image = inception_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)
    processed_images = tf.expand_dims(processed_image, 0)

    # Create the model, use the default arg scope to configure the batch norm parameters.
    print('Inicializando el modelo InceptionV3...')
    with slim.arg_scope(v3.inception_v3_arg_scope()):
      vector, _ = v3.inception_v3(processed_images, num_classes=1001, is_training=False)

    init_fn = slim.assign_from_checkpoint_fn(os.path.join(checkpoints_dir, 'inception_v3.ckpt'),
                                             slim.get_model_variables('InceptionV3'))
    with tf.Session() as sess:
      init_fn(sess)
      np_image, vector = sess.run([image, vector])

    a = np.asarray([x for xs in vector for xss in xs for xsss in xss for x in xsss])
    np.reshape(a, (1, 2048))

  return a
コード例 #4
0
def main(_):
  """Runs the download and conversion operation.

  Args:
    dataset_dir: The dataset directory where the dataset is stored.
  """
  dataset_dir = FLAGS.dataset_dir

  if not tf.gfile.Exists(dataset_dir):
    tf.gfile.MakeDirs(dataset_dir)

  training_filename = _get_output_filename(dataset_dir, 'train')
  testing_filename = _get_output_filename(dataset_dir, 'test')

  if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):
    print('Dataset files already exist. Exiting without re-creating them.')
    return

  dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)

  # First, process the training data:
  with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
    offset = 0
    for i in range(_NUM_TRAIN_FILES):
      filename = os.path.join(dataset_dir,
                              'cifar-10-batches-py',
                              'data_batch_%d' % (i + 1))  # 1-indexed.
      offset = _add_to_tfrecord(filename, tfrecord_writer, offset)

  # Next, process the testing data:
  with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
    filename = os.path.join(dataset_dir,
                            'cifar-10-batches-py',
                            'test_batch')
    _add_to_tfrecord(filename, tfrecord_writer)

  # Finally, write the labels file:
  labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
  dataset_utils.write_label_file(labels_to_class_names, dataset_dir)

  _clean_up_temporary_files(dataset_dir)
  print('\nFinished converting the Cifar10 dataset!')
コード例 #5
0
# Base url
TF_MODELS_URL = "http://download.tensorflow.org/models/"
# Modify this path for a different CNN
INCEPTION_V3_URL = TF_MODELS_URL + "inception_v3_2016_08_28.tar.gz"
INCEPTION_V4_URL = TF_MODELS_URL + "inception_v4_2016_09_09.tar.gz"
# Directory to save model checkpoints
MODELS_DIR = "models/cnn"
INCEPTION_V3_CKPT_PATH = MODELS_DIR + "/inception_v3.ckpt"
INCEPTION_V4_CKPT_PATH = MODELS_DIR + "/inception_v4.ckpt"
# Make the model directory if it does not exist
if not tf.gfile.Exists(MODELS_DIR):
    tf.gfile.MakeDirs(MODELS_DIR)
 
# Download the appropriate model if haven't already done so
if not os.path.exists(INCEPTION_V3_CKPT_PATH):    
    dataset_utils.download_and_uncompress_tarball(INCEPTION_V3_URL, MODELS_DIR)
    
if not os.path.exists(INCEPTION_V4_CKPT_PATH):
    dataset_utils.download_and_uncompress_tarball(INCEPTION_V4_URL, MODELS_DIR)

#Processing Images
import inception_preprocessing
# This can be modified depending on the model used and the training image dataset
def process_image(image):
    root_dir = "images/"
    filename = root_dir + image
    with open(filename, "rb") as f:
        image_str = f.read()
        
    if image.endswith('jpg'):
        raw_image = tf.image.decode_jpeg(image_str, channels=3)
コード例 #6
0
ファイル: trainer.py プロジェクト: Andrew62/dogcatcher
    print("DEBUG MODE")
    BATCH_SIZE = 2
    EPOCHS = 1
else:
    BATCH_SIZE = 16
    EPOCHS = 90


classes = util.pkl_load(workspace.class_pkl)
csv_files = workspace.csvs

if not tf.gfile.Exists(workspace.inception_cpkt):
    tf.gfile.MakeDirs(workspace.inception_cpkt)

if not os.path.exists(os.path.join(workspace.inception_cpkt, os.path.basename(workspace.inception_url))):
    dataset_utils.download_and_uncompress_tarball(workspace.inception_url,
                                                  workspace.inception_cpkt)


def get_init_fn():
    """Returns a function run by the chief worker to warm-start the training."""
    checkpoint_exclude_scopes = ["InceptionV1/Logits", "InceptionV1/AuxLogits"]

    exclusions = [scope.strip() for scope in checkpoint_exclude_scopes]

    variables_to_restore = []
    for var in slim.get_model_variables():
        excluded = False
        for exclusion in exclusions:
            if var.op.name.startswith(exclusion):
                excluded = True
                break
コード例 #7
0
ファイル: trainer.py プロジェクト: Andrew62/dogcatcher
    BATCH_SIZE = 2
    EPOCHS = 1
else:
    BATCH_SIZE = 16
    EPOCHS = 90

classes = util.pkl_load(workspace.class_pkl)
csv_files = workspace.csvs

if not tf.gfile.Exists(workspace.inception_cpkt):
    tf.gfile.MakeDirs(workspace.inception_cpkt)

if not os.path.exists(
        os.path.join(workspace.inception_cpkt,
                     os.path.basename(workspace.inception_url))):
    dataset_utils.download_and_uncompress_tarball(workspace.inception_url,
                                                  workspace.inception_cpkt)


def get_init_fn():
    """Returns a function run by the chief worker to warm-start the training."""
    checkpoint_exclude_scopes = ["InceptionV1/Logits", "InceptionV1/AuxLogits"]

    exclusions = [scope.strip() for scope in checkpoint_exclude_scopes]

    variables_to_restore = []
    for var in slim.get_model_variables():
        excluded = False
        for exclusion in exclusions:
            if var.op.name.startswith(exclusion):
                excluded = True
                break
import dataset_utils
import numpy as np
import os
import tensorflow as tf

url = "http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz"
checkpoints_dir = '/tmp/checkpoints'

if not tf.gfile.Exists(checkpoints_dir):
    tf.gfile.MakeDirs(checkpoints_dir)

dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)