예제 #1
0
 def get_wmt_enfr_train_set(path):
     """Download the WMT en-fr training corpus to directory unless it's there."""
     filename = "training-giga-fren.tar"
     maybe_download_and_extract(filename, path, _WMT_ENFR_TRAIN_URL, extract=True)
     train_path = os.path.join(path, "giga-fren.release2.fixed")
     gunzip_file(train_path + ".fr.gz", train_path + ".fr")
     gunzip_file(train_path + ".en.gz", train_path + ".en")
     return train_path
예제 #2
0
 def get_wmt_enfr_train_set(path):
     """Download the WMT en-fr training corpus to directory unless it's there."""
     filename = "training-giga-fren.tar"
     maybe_download_and_extract(filename, path, _WMT_ENFR_TRAIN_URL, extract=True)
     train_path = os.path.join(path, "giga-fren.release2.fixed")
     gunzip_file(train_path + ".fr.gz", train_path + ".fr")
     gunzip_file(train_path + ".en.gz", train_path + ".en")
     return train_path
예제 #3
0
def load_cyclegan_dataset(filename='summer2winter_yosemite', path='raw_data'):
    """Load images from CycleGAN's database, see `this link <https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/>`__.

    Parameters
    ------------
    filename : str
        The dataset you want, see `this link <https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/>`__.
    path : str
        The path that the data is downloaded to, defaults is `data/cyclegan`

    Examples
    ---------
    >>> im_train_A, im_train_B, im_test_A, im_test_B = load_cyclegan_dataset(filename='summer2winter_yosemite')

    """
    path = os.path.join(path, 'cyclegan')
    url = 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/'

    if folder_exists(os.path.join(path, filename)) is False:
        logging.info("[*] {} is nonexistent in {}".format(filename, path))
        maybe_download_and_extract(filename + '.zip', path, url, extract=True)
        del_file(os.path.join(path, filename + '.zip'))

    def load_image_from_folder(path):
        path_imgs = load_file_list(path=path, regx='\\.jpg', printable=False)
        return visualize.read_images(path_imgs,
                                     path=path,
                                     n_threads=10,
                                     printable=False)

    im_train_A = load_image_from_folder(os.path.join(path, filename, "trainA"))
    im_train_B = load_image_from_folder(os.path.join(path, filename, "trainB"))
    im_test_A = load_image_from_folder(os.path.join(path, filename, "testA"))
    im_test_B = load_image_from_folder(os.path.join(path, filename, "testB"))

    def if_2d_to_3d(images):  # [h, w] --> [h, w, 3]
        for i, _v in enumerate(images):
            if len(images[i].shape) == 2:
                images[i] = images[i][:, :, np.newaxis]
                images[i] = np.tile(images[i], (1, 1, 3))
        return images

    im_train_A = if_2d_to_3d(im_train_A)
    im_train_B = if_2d_to_3d(im_train_B)
    im_test_A = if_2d_to_3d(im_test_A)
    im_test_B = if_2d_to_3d(im_test_B)

    return im_train_A, im_train_B, im_test_A, im_test_B
예제 #4
0
 def load_mnist_labels(path, filename):
     filepath = maybe_download_and_extract(filename, path, url)
     # Read the labels in Yann LeCun's binary format.
     with gzip.open(filepath, 'rb') as f:
         data = np.frombuffer(f.read(), np.uint8, offset=8)
     # The labels are vectors of integers now, that's exactly what we want.
     return data
예제 #5
0
def load_nietzsche_dataset(path='data'):
    """Load Nietzsche dataset.

    Parameters
    ----------
    path : str
        The path that the data is downloaded to, defaults is ``data/nietzsche/``.

    Returns
    --------
    str
        The content.

    Examples
    --------
    >>> see tutorial_generate_text.py
    >>> words = tl.files.load_nietzsche_dataset()
    >>> words = basic_clean_str(words)
    >>> words = words.split()

    """
    logging.info("Load or Download nietzsche dataset > {}".format(path))
    path = os.path.join(path, 'nietzsche')

    filename = "nietzsche.txt"
    url = 'https://s3.amazonaws.com/text-datasets/'
    filepath = maybe_download_and_extract(filename, path, url)

    with open(filepath, "r") as f:
        words = f.read()
        return words
예제 #6
0
 def load_mnist_labels(path, filename):
     filepath = maybe_download_and_extract(filename, path, url)
     # Read the labels in Yann LeCun's binary format.
     with gzip.open(filepath, 'rb') as f:
         data = np.frombuffer(f.read(), np.uint8, offset=8)
     # The labels are vectors of integers now, that's exactly what we want.
     return data
예제 #7
0
def load_cyclegan_dataset(filename='summer2winter_yosemite', path='data'):
    """Load images from CycleGAN's database, see `this link <https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/>`__.

    Parameters
    ------------
    filename : str
        The dataset you want, see `this link <https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/>`__.
    path : str
        The path that the data is downloaded to, defaults is `data/cyclegan`

    Examples
    ---------
    >>> im_train_A, im_train_B, im_test_A, im_test_B = load_cyclegan_dataset(filename='summer2winter_yosemite')

    """
    path = os.path.join(path, 'cyclegan')
    url = 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/'

    if folder_exists(os.path.join(path, filename)) is False:
        logging.info("[*] {} is nonexistent in {}".format(filename, path))
        maybe_download_and_extract(filename + '.zip', path, url, extract=True)
        del_file(os.path.join(path, filename + '.zip'))

    def load_image_from_folder(path):
        path_imgs = load_file_list(path=path, regx='\\.jpg', printable=False)
        return visualize.read_images(path_imgs, path=path, n_threads=10, printable=False)

    im_train_A = load_image_from_folder(os.path.join(path, filename, "trainA"))
    im_train_B = load_image_from_folder(os.path.join(path, filename, "trainB"))
    im_test_A = load_image_from_folder(os.path.join(path, filename, "testA"))
    im_test_B = load_image_from_folder(os.path.join(path, filename, "testB"))

    def if_2d_to_3d(images):  # [h, w] --> [h, w, 3]
        for i, _v in enumerate(images):
            if len(images[i].shape) == 2:
                images[i] = images[i][:, :, np.newaxis]
                images[i] = np.tile(images[i], (1, 1, 3))
        return images

    im_train_A = if_2d_to_3d(im_train_A)
    im_train_B = if_2d_to_3d(im_train_B)
    im_test_A = if_2d_to_3d(im_test_A)
    im_test_B = if_2d_to_3d(im_test_B)

    return im_train_A, im_train_B, im_test_A, im_test_B
예제 #8
0
    def load_mnist_images(path, filename):
        filepath = maybe_download_and_extract(filename, path, url)

        logging.info(filepath)
        # Read the inputs in Yann LeCun's binary format.
        with gzip.open(filepath, 'rb') as f:
            data = np.frombuffer(f.read(), np.uint8, offset=16)
        # The inputs are vectors now, we reshape them to monochrome 2D images,
        # following the shape convention: (examples, channels, rows, columns)
        data = data.reshape(shape)
        # The inputs come as bytes, we convert them to float32 in range [0,1].
        # (Actually to range [0, 255/256], for compatibility to the version
        # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
        return data / np.float32(256)
예제 #9
0
    def load_mnist_images(path, filename):
        filepath = maybe_download_and_extract(filename, path, url)

        logging.info(filepath)
        # Read the inputs in Yann LeCun's binary format.
        with gzip.open(filepath, 'rb') as f:
            data = np.frombuffer(f.read(), np.uint8, offset=16)
        # The inputs are vectors now, we reshape them to monochrome 2D images,
        # following the shape convention: (examples, channels, rows, columns)
        data = data.reshape(shape)
        # The inputs come as bytes, we convert them to float32 in range [0,1].
        # (Actually to range [0, 255/256], for compatibility to the version
        # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
        return data / np.float32(256)
예제 #10
0
def load_matt_mahoney_text8_dataset(path='data'):
    """Load Matt Mahoney's dataset.

    Download a text file from Matt Mahoney's website
    if not present, and make sure it's the right size.
    Extract the first file enclosed in a zip file as a list of words.
    This dataset can be used for Word Embedding.

    Parameters
    ----------
    path : str
        The path that the data is downloaded to, defaults is ``data/mm_test8/``.

    Returns
    --------
    list of str
        The raw text data e.g. [.... 'their', 'families', 'who', 'were', 'expelled', 'from', 'jerusalem', ...]

    Examples
    --------
    >>> words = tl.files.load_matt_mahoney_text8_dataset()
    >>> print('Data size', len(words))

    """
    path = os.path.join(path, 'mm_test8')
    logging.info(
        "Load or Download matt_mahoney_text8 Dataset> {}".format(path))

    filename = 'text8.zip'
    url = 'http://mattmahoney.net/dc/'
    maybe_download_and_extract(filename, path, url, expected_bytes=31344016)

    with zipfile.ZipFile(os.path.join(path, filename)) as f:
        word_list = f.read(f.namelist()[0]).split()
        for idx, _ in enumerate(word_list):
            word_list[idx] = word_list[idx].decode()
    return word_list
예제 #11
0
def load_matt_mahoney_text8_dataset(path='data'):
    """Load Matt Mahoney's dataset.

    Download a text file from Matt Mahoney's website
    if not present, and make sure it's the right size.
    Extract the first file enclosed in a zip file as a list of words.
    This dataset can be used for Word Embedding.

    Parameters
    ----------
    path : str
        The path that the data is downloaded to, defaults is ``data/mm_test8/``.

    Returns
    --------
    list of str
        The raw text data e.g. [.... 'their', 'families', 'who', 'were', 'expelled', 'from', 'jerusalem', ...]

    Examples
    --------
    >>> words = tl.files.load_matt_mahoney_text8_dataset()
    >>> print('Data size', len(words))

    """
    path = os.path.join(path, 'mm_test8')
    logging.info("Load or Download matt_mahoney_text8 Dataset> {}".format(path))

    filename = 'text8.zip'
    url = 'http://mattmahoney.net/dc/'
    maybe_download_and_extract(filename, path, url, expected_bytes=31344016)

    with zipfile.ZipFile(os.path.join(path, filename)) as f:
        word_list = f.read(f.namelist()[0]).split()
        for idx, _ in enumerate(word_list):
            word_list[idx] = word_list[idx].decode()
    return word_list
예제 #12
0
 def get_wmt_enfr_dev_set(path):
     """Download the WMT en-fr training corpus to directory unless it's there."""
     filename = "dev-v2.tgz"
     dev_file = maybe_download_and_extract(filename, path, _WMT_ENFR_DEV_URL, extract=False)
     dev_name = "newstest2013"
     dev_path = os.path.join(path, "newstest2013")
     if not (gfile.Exists(dev_path + ".fr") and gfile.Exists(dev_path + ".en")):
         logging.info("Extracting tgz file %s" % dev_file)
         with tarfile.open(dev_file, "r:gz") as dev_tar:
             fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr")
             en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en")
             fr_dev_file.name = dev_name + ".fr"  # Extract without "dev/" prefix.
             en_dev_file.name = dev_name + ".en"
             dev_tar.extract(fr_dev_file, path)
             dev_tar.extract(en_dev_file, path)
     return dev_path
예제 #13
0
 def get_wmt_enfr_dev_set(path):
     """Download the WMT en-fr training corpus to directory unless it's there."""
     filename = "dev-v2.tgz"
     dev_file = maybe_download_and_extract(filename, path, _WMT_ENFR_DEV_URL, extract=False)
     dev_name = "newstest2013"
     dev_path = os.path.join(path, "newstest2013")
     if not (gfile.Exists(dev_path + ".fr") and gfile.Exists(dev_path + ".en")):
         logging.info("Extracting tgz file %s" % dev_file)
         with tarfile.open(dev_file, "r:gz") as dev_tar:
             fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr")
             en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en")
             fr_dev_file.name = dev_name + ".fr"  # Extract without "dev/" prefix.
             en_dev_file.name = dev_name + ".en"
             dev_tar.extract(fr_dev_file, path)
             dev_tar.extract(en_dev_file, path)
     return dev_path
예제 #14
0
def load_ptb_dataset(path='data'):
    """Load Penn TreeBank (PTB) dataset.

    It is used in many LANGUAGE MODELING papers,
    including "Empirical Evaluation and Combination of Advanced Language
    Modeling Techniques", "Recurrent Neural Network Regularization".
    It consists of 929k training words, 73k validation words, and 82k test
    words. It has 10k words in its vocabulary.

    Parameters
    ----------
    path : str
        The path that the data is downloaded to, defaults is ``data/ptb/``.

    Returns
    --------
    train_data, valid_data, test_data : list of int
        The training, validating and testing data in integer format.
    vocab_size : int
        The vocabulary size.

    Examples
    --------
    >>> train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset()

    References
    ---------------
    - ``tensorflow.models.rnn.ptb import reader``
    - `Manual download <http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz>`__

    Notes
    ------
    - If you want to get the raw data, see the source code.

    """
    path = os.path.join(path, 'ptb')
    logging.info("Load or Download Penn TreeBank (PTB) dataset > {}".format(path))

    #Maybe dowload and uncompress tar, or load exsisting files
    filename = 'simple-examples.tgz'
    url = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/'
    maybe_download_and_extract(filename, path, url, extract=True)

    data_path = os.path.join(path, 'simple-examples', 'data')
    train_path = os.path.join(data_path, "ptb.train.txt")
    valid_path = os.path.join(data_path, "ptb.valid.txt")
    test_path = os.path.join(data_path, "ptb.test.txt")

    word_to_id = nlp.build_vocab(nlp.read_words(train_path))

    train_data = nlp.words_to_word_ids(nlp.read_words(train_path), word_to_id)
    valid_data = nlp.words_to_word_ids(nlp.read_words(valid_path), word_to_id)
    test_data = nlp.words_to_word_ids(nlp.read_words(test_path), word_to_id)
    vocab_size = len(word_to_id)

    # logging.info(nlp.read_words(train_path)) # ... 'according', 'to', 'mr.', '<unk>', '<eos>']
    # logging.info(train_data)                 # ...  214,         5,    23,    1,       2]
    # logging.info(word_to_id)                 # ... 'beyond': 1295, 'anti-nuclear': 9599, 'trouble': 1520, '<eos>': 2 ... }
    # logging.info(vocabulary)                 # 10000
    # exit()
    return train_data, valid_data, test_data, vocab_size
예제 #15
0
def load_imdb_dataset(
        path='data', nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, start_char=1, oov_char=2,
        index_from=3
):
    """Load IMDB dataset.

    Parameters
    ----------
    path : str
        The path that the data is downloaded to, defaults is ``data/imdb/``.
    nb_words : int
        Number of words to get.
    skip_top : int
        Top most frequent words to ignore (they will appear as oov_char value in the sequence data).
    maxlen : int
        Maximum sequence length. Any longer sequence will be truncated.
    seed : int
        Seed for reproducible data shuffling.
    start_char : int
        The start of a sequence will be marked with this character. Set to 1 because 0 is usually the padding character.
    oov_char : int
        Words that were cut out because of the num_words or skip_top limit will be replaced with this character.
    index_from : int
        Index actual words with this index and higher.

    Examples
    --------
    >>> X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset(
    ...                                 nb_words=20000, test_split=0.2)
    >>> print('X_train.shape', X_train.shape)
    ... (20000,)  [[1, 62, 74, ... 1033, 507, 27],[1, 60, 33, ... 13, 1053, 7]..]
    >>> print('y_train.shape', y_train.shape)
    ... (20000,)  [1 0 0 ..., 1 0 1]

    References
    -----------
    - `Modified from keras. <https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py>`__

    """
    path = os.path.join(path, 'imdb')

    filename = "imdb.pkl"
    url = 'https://s3.amazonaws.com/text-datasets/'
    maybe_download_and_extract(filename, path, url)

    if filename.endswith(".gz"):
        f = gzip.open(os.path.join(path, filename), 'rb')
    else:
        f = open(os.path.join(path, filename), 'rb')

    X, labels = cPickle.load(f)
    f.close()

    np.random.seed(seed)
    np.random.shuffle(X)
    np.random.seed(seed)
    np.random.shuffle(labels)

    if start_char is not None:
        X = [[start_char] + [w + index_from for w in x] for x in X]
    elif index_from:
        X = [[w + index_from for w in x] for x in X]

    if maxlen:
        new_X = []
        new_labels = []
        for x, y in zip(X, labels):
            if len(x) < maxlen:
                new_X.append(x)
                new_labels.append(y)
        X = new_X
        labels = new_labels
    if not X:
        raise Exception(
            'After filtering for sequences shorter than maxlen=' + str(maxlen) + ', no sequence was kept. '
            'Increase maxlen.'
        )
    if not nb_words:
        nb_words = max([max(x) for x in X])

    # by convention, use 2 as OOV word
    # reserve 'index_from' (=3 by default) characters: 0 (padding), 1 (start), 2 (OOV)
    if oov_char is not None:
        X = [[oov_char if (w >= nb_words or w < skip_top) else w for w in x] for x in X]
    else:
        nX = []
        for x in X:
            nx = []
            for w in x:
                if (w >= nb_words or w < skip_top):
                    nx.append(w)
            nX.append(nx)
        X = nX

    X_train = np.array(X[:int(len(X) * (1 - test_split))])
    y_train = np.array(labels[:int(len(X) * (1 - test_split))])

    X_test = np.array(X[int(len(X) * (1 - test_split)):])
    y_test = np.array(labels[int(len(X) * (1 - test_split)):])

    return X_train, y_train, X_test, y_test
예제 #16
0
def load_voc_dataset(path='data',
                     dataset='2012',
                     contain_classes_in_person=False):
    """Pascal VOC 2007/2012 Dataset.

    It has 20 objects:
    aeroplane, bicycle, bird, boat, bottle, bus, car, cat, chair, cow, diningtable, dog, horse, motorbike, person, pottedplant, sheep, sofa, train, tvmonitor
    and additional 3 classes : head, hand, foot for person.

    Parameters
    -----------
    path : str
        The path that the data is downloaded to, defaults is ``data/VOC``.
    dataset : str
        The VOC dataset version, `2012`, `2007`, `2007test` or `2012test`. We usually train model on `2007+2012` and test it on `2007test`.
    contain_classes_in_person : boolean
        Whether include head, hand and foot annotation, default is False.

    Returns
    ---------
    imgs_file_list : list of str
        Full paths of all images.
    imgs_semseg_file_list : list of str
        Full paths of all maps for semantic segmentation. Note that not all images have this map!
    imgs_insseg_file_list : list of str
        Full paths of all maps for instance segmentation. Note that not all images have this map!
    imgs_ann_file_list : list of str
        Full paths of all annotations for bounding box and object class, all images have this annotations.
    classes : list of str
        Classes in order.
    classes_in_person : list of str
        Classes in person.
    classes_dict : dictionary
        Class label to integer.
    n_objs_list : list of int
        Number of objects in all images in ``imgs_file_list`` in order.
    objs_info_list : list of str
        Darknet format for the annotation of all images in ``imgs_file_list`` in order. ``[class_id x_centre y_centre width height]`` in ratio format.
    objs_info_dicts : dictionary
        The annotation of all images in ``imgs_file_list``, ``{imgs_file_list : dictionary for annotation}``,
        format from `TensorFlow/Models/object-detection <https://github.com/tensorflow/models/blob/master/object_detection/create_pascal_tf_record.py>`__.

    Examples
    ----------
    >>> imgs_file_list, imgs_semseg_file_list, imgs_insseg_file_list, imgs_ann_file_list,
    >>>     classes, classes_in_person, classes_dict,
    >>>     n_objs_list, objs_info_list, objs_info_dicts = tl.files.load_voc_dataset(dataset="2012", contain_classes_in_person=False)
    >>> idx = 26
    >>> print(classes)
    ... ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
    >>> print(classes_dict)
    ... {'sheep': 16, 'horse': 12, 'bicycle': 1, 'bottle': 4, 'cow': 9, 'sofa': 17, 'car': 6, 'dog': 11, 'cat': 7, 'person': 14, 'train': 18, 'diningtable': 10, 'aeroplane': 0, 'bus': 5, 'pottedplant': 15, 'tvmonitor': 19, 'chair': 8, 'bird': 2, 'boat': 3, 'motorbike': 13}
    >>> print(imgs_file_list[idx])
    ... data/VOC/VOC2012/JPEGImages/2007_000423.jpg
    >>> print(n_objs_list[idx])
    ... 2
    >>> print(imgs_ann_file_list[idx])
    ... data/VOC/VOC2012/Annotations/2007_000423.xml
    >>> print(objs_info_list[idx])
    ... 14 0.173 0.461333333333 0.142 0.496
    ... 14 0.828 0.542666666667 0.188 0.594666666667
    >>> ann = tl.prepro.parse_darknet_ann_str_to_list(objs_info_list[idx])
    >>> print(ann)
    ... [[14, 0.173, 0.461333333333, 0.142, 0.496], [14, 0.828, 0.542666666667, 0.188, 0.594666666667]]
    >>> c, b = tl.prepro.parse_darknet_ann_list_to_cls_box(ann)
    >>> print(c, b)
    ... [14, 14] [[0.173, 0.461333333333, 0.142, 0.496], [0.828, 0.542666666667, 0.188, 0.594666666667]]

    References
    -------------
    - `Pascal VOC2012 Website <https://pjreddie.com/projects/pascal-voc-dataset-mirror/>`__.
    - `Pascal VOC2007 Website <https://pjreddie.com/projects/pascal-voc-dataset-mirror/>`__.

    """
    path = os.path.join(path, 'VOC')

    def _recursive_parse_xml_to_dict(xml):
        """Recursively parses XML contents to python dict.

        We assume that `object` tags are the only ones that can appear
        multiple times at the same level of a tree.

        Args:
            xml: xml tree obtained by parsing XML file contents using lxml.etree

        Returns:
            Python dictionary holding XML contents.

        """
        if xml is not None:
            return {xml.tag: xml.text}
        result = {}
        for child in xml:
            child_result = _recursive_parse_xml_to_dict(child)
            if child.tag != 'object':
                result[child.tag] = child_result[child.tag]
            else:
                if child.tag not in result:
                    result[child.tag] = []
                result[child.tag].append(child_result[child.tag])
        return {xml.tag: result}

    import xml.etree.ElementTree as ET

    if dataset == "2012":
        url = "http://pjreddie.com/media/files/"
        tar_filename = "VOCtrainval_11-May-2012.tar"
        extracted_filename = "VOC2012"  #"VOCdevkit/VOC2012"
        logging.info("    [============= VOC 2012 =============]")
    elif dataset == "2012test":
        extracted_filename = "VOC2012test"  #"VOCdevkit/VOC2012"
        logging.info("    [============= VOC 2012 Test Set =============]")
        logging.info(
            "    \nAuthor: 2012test only have person annotation, so 2007test is highly recommended for testing !\n"
        )
        import time
        time.sleep(3)
        if os.path.isdir(os.path.join(path, extracted_filename)) is False:
            logging.info(
                "For VOC 2012 Test data - online registration required")
            logging.info(
                " Please download VOC2012test.tar from:  \n register: http://host.robots.ox.ac.uk:8080 \n voc2012 : http://host.robots.ox.ac.uk:8080/eval/challenges/voc2012/ \ndownload: http://host.robots.ox.ac.uk:8080/eval/downloads/VOC2012test.tar"
            )
            logging.info(
                " unzip VOC2012test.tar,rename the folder to VOC2012test and put it into %s"
                % path)
            exit()
        # # http://host.robots.ox.ac.uk:8080/eval/downloads/VOC2012test.tar
        # url = "http://host.robots.ox.ac.uk:8080/eval/downloads/"
        # tar_filename = "VOC2012test.tar"
    elif dataset == "2007":
        url = "http://pjreddie.com/media/files/"
        tar_filename = "VOCtrainval_06-Nov-2007.tar"
        extracted_filename = "VOC2007"
        logging.info("    [============= VOC 2007 =============]")
    elif dataset == "2007test":
        # http://host.robots.ox.ac.uk/pascal/VOC/voc2007/index.html#testdata
        # http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar
        url = "http://pjreddie.com/media/files/"
        tar_filename = "VOCtest_06-Nov-2007.tar"
        extracted_filename = "VOC2007test"
        logging.info("    [============= VOC 2007 Test Set =============]")
    else:
        raise Exception(
            "Please set the dataset aug to 2012, 2012test or 2007.")

    # download dataset
    if dataset != "2012test":
        from sys import platform as _platform
        if folder_exists(os.path.join(path, extracted_filename)) is False:
            logging.info("[VOC] {} is nonexistent in {}".format(
                extracted_filename, path))
            maybe_download_and_extract(tar_filename, path, url, extract=True)
            del_file(os.path.join(path, tar_filename))
            if dataset == "2012":
                if _platform == "win32":
                    os.system("move {}\VOCdevkit\VOC2012 {}\VOC2012".format(
                        path, path))
                else:
                    os.system("mv {}/VOCdevkit/VOC2012 {}/VOC2012".format(
                        path, path))
            elif dataset == "2007":
                if _platform == "win32":
                    os.system("move {}\VOCdevkit\VOC2007 {}\VOC2007".format(
                        path, path))
                else:
                    os.system("mv {}/VOCdevkit/VOC2007 {}/VOC2007".format(
                        path, path))
            elif dataset == "2007test":
                if _platform == "win32":
                    os.system(
                        "move {}\VOCdevkit\VOC2007 {}\VOC2007test".format(
                            path, path))
                else:
                    os.system("mv {}/VOCdevkit/VOC2007 {}/VOC2007test".format(
                        path, path))
            del_folder(os.path.join(path, 'VOCdevkit'))
    # object classes(labels)  NOTE: YOU CAN CUSTOMIZE THIS LIST
    classes = [
        "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
        "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
        "pottedplant", "sheep", "sofa", "train", "tvmonitor"
    ]
    if contain_classes_in_person:
        classes_in_person = ["head", "hand", "foot"]
    else:
        classes_in_person = []

    classes += classes_in_person  # use extra 3 classes for person

    classes_dict = utils.list_string_to_dict(classes)
    logging.info("[VOC] object classes {}".format(classes_dict))

    # 1. image path list
    # folder_imgs = path+"/"+extracted_filename+"/JPEGImages/"
    folder_imgs = os.path.join(path, extracted_filename, "JPEGImages")
    imgs_file_list = load_file_list(path=folder_imgs,
                                    regx='\\.jpg',
                                    printable=False)
    logging.info("[VOC] {} images found".format(len(imgs_file_list)))

    imgs_file_list.sort(
        key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2]
                          ))  # 2007_000027.jpg --> 2007000027

    imgs_file_list = [os.path.join(folder_imgs, s) for s in imgs_file_list]
    # logging.info('IM',imgs_file_list[0::3333], imgs_file_list[-1])
    if dataset != "2012test":
        ##======== 2. semantic segmentation maps path list
        # folder_semseg = path+"/"+extracted_filename+"/SegmentationClass/"
        folder_semseg = os.path.join(path, extracted_filename,
                                     "SegmentationClass")
        imgs_semseg_file_list = load_file_list(path=folder_semseg,
                                               regx='\\.png',
                                               printable=False)
        logging.info("[VOC] {} maps for semantic segmentation found".format(
            len(imgs_semseg_file_list)))
        imgs_semseg_file_list.sort(key=lambda s: int(
            s.replace('.', ' ').replace('_', '').split(' ')[-2])
                                   )  # 2007_000032.png --> 2007000032
        imgs_semseg_file_list = [
            os.path.join(folder_semseg, s) for s in imgs_semseg_file_list
        ]
        # logging.info('Semantic Seg IM',imgs_semseg_file_list[0::333], imgs_semseg_file_list[-1])
        ##======== 3. instance segmentation maps path list
        # folder_insseg = path+"/"+extracted_filename+"/SegmentationObject/"
        folder_insseg = os.path.join(path, extracted_filename,
                                     "SegmentationObject")
        imgs_insseg_file_list = load_file_list(path=folder_insseg,
                                               regx='\\.png',
                                               printable=False)
        logging.info("[VOC] {} maps for instance segmentation found".format(
            len(imgs_semseg_file_list)))
        imgs_insseg_file_list.sort(key=lambda s: int(
            s.replace('.', ' ').replace('_', '').split(' ')[-2])
                                   )  # 2007_000032.png --> 2007000032
        imgs_insseg_file_list = [
            os.path.join(folder_insseg, s) for s in imgs_insseg_file_list
        ]
        # logging.info('Instance Seg IM',imgs_insseg_file_list[0::333], imgs_insseg_file_list[-1])
    else:
        imgs_semseg_file_list = []
        imgs_insseg_file_list = []
    # 4. annotations for bounding box and object class
    # folder_ann = path+"/"+extracted_filename+"/Annotations/"
    folder_ann = os.path.join(path, extracted_filename, "Annotations")
    imgs_ann_file_list = load_file_list(path=folder_ann,
                                        regx='\\.xml',
                                        printable=False)
    logging.info(
        "[VOC] {} XML annotation files for bounding box and object class found"
        .format(len(imgs_ann_file_list)))
    imgs_ann_file_list.sort(
        key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2]
                          ))  # 2007_000027.xml --> 2007000027
    imgs_ann_file_list = [
        os.path.join(folder_ann, s) for s in imgs_ann_file_list
    ]
    # logging.info('ANN',imgs_ann_file_list[0::3333], imgs_ann_file_list[-1])

    if dataset == "2012test":  # remove unused images in JPEG folder
        imgs_file_list_new = []
        for ann in imgs_ann_file_list:
            ann = os.path.split(ann)[-1].split('.')[0]
            for im in imgs_file_list:
                if ann in im:
                    imgs_file_list_new.append(im)
                    break
        imgs_file_list = imgs_file_list_new
        logging.info("[VOC] keep %d images" % len(imgs_file_list_new))

    # parse XML annotations
    def convert(size, box):
        dw = 1. / size[0]
        dh = 1. / size[1]
        x = (box[0] + box[1]) / 2.0
        y = (box[2] + box[3]) / 2.0
        w = box[1] - box[0]
        h = box[3] - box[2]
        x = x * dw
        w = w * dw
        y = y * dh
        h = h * dh
        return x, y, w, h

    def convert_annotation(file_name):
        """Given VOC2012 XML Annotations, returns number of objects and info."""
        in_file = open(file_name)
        out_file = ""
        tree = ET.parse(in_file)
        root = tree.getroot()
        size = root.find('size')
        w = int(size.find('width').text)
        h = int(size.find('height').text)
        n_objs = 0

        for obj in root.iter('object'):
            if dataset != "2012test":
                difficult = obj.find('difficult').text
                cls = obj.find('name').text
                if cls not in classes or int(difficult) == 1:
                    continue
            else:
                cls = obj.find('name').text
                if cls not in classes:
                    continue
            cls_id = classes.index(cls)
            xmlbox = obj.find('bndbox')
            b = (float(xmlbox.find('xmin').text),
                 float(xmlbox.find('xmax').text),
                 float(xmlbox.find('ymin').text),
                 float(xmlbox.find('ymax').text))
            bb = convert((w, h), b)

            out_file += str(cls_id) + " " + " ".join([str(a)
                                                      for a in bb]) + '\n'
            n_objs += 1
            if cls in "person":
                for part in obj.iter('part'):
                    cls = part.find('name').text
                    if cls not in classes_in_person:
                        continue
                    cls_id = classes.index(cls)
                    xmlbox = part.find('bndbox')
                    b = (float(xmlbox.find('xmin').text),
                         float(xmlbox.find('xmax').text),
                         float(xmlbox.find('ymin').text),
                         float(xmlbox.find('ymax').text))
                    bb = convert((w, h), b)
                    # out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
                    out_file += str(cls_id) + " " + " ".join(
                        [str(a) for a in bb]) + '\n'
                    n_objs += 1
        in_file.close()
        return n_objs, out_file

    logging.info("[VOC] Parsing xml annotations files")
    n_objs_list = []
    objs_info_list = []  # Darknet Format list of string
    objs_info_dicts = {}
    for idx, ann_file in enumerate(imgs_ann_file_list):
        n_objs, objs_info = convert_annotation(ann_file)
        n_objs_list.append(n_objs)
        objs_info_list.append(objs_info)
        with tf.gfile.GFile(ann_file, 'r') as fid:
            xml_str = fid.read()
        xml = etree.fromstring(xml_str)
        data = _recursive_parse_xml_to_dict(xml)['annotation']
        objs_info_dicts.update({imgs_file_list[idx]: data})

    return imgs_file_list, imgs_semseg_file_list, imgs_insseg_file_list, imgs_ann_file_list, classes, classes_in_person, classes_dict, n_objs_list, objs_info_list, objs_info_dicts
예제 #17
0
def load_flickr25k_dataset(tag='sky',
                           path="data",
                           n_threads=50,
                           printable=False):
    """Load Flickr25K dataset.

    Returns a list of images by a given tag from Flick25k dataset,
    it will download Flickr25k from `the official website <http://press.liacs.nl/mirflickr/mirdownload.html>`__
    at the first time you use it.

    Parameters
    ------------
    tag : str or None
        What images to return.
            - If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search <https://www.flickr.com/search/>`__.
            - If you want to get all images, set to ``None``.

    path : str
        The path that the data is downloaded to, defaults is ``data/flickr25k/``.
    n_threads : int
        The number of thread to read image.
    printable : boolean
        Whether to print infomation when reading images, default is ``False``.

    Examples
    -----------
    Get images with tag of sky

    >>> images = tl.files.load_flickr25k_dataset(tag='sky')

    Get all images

    >>> images = tl.files.load_flickr25k_dataset(tag=None, n_threads=100, printable=True)

    """
    path = os.path.join(path, 'flickr25k')

    filename = 'mirflickr25k.zip'
    url = 'http://press.liacs.nl/mirflickr/mirflickr25k/'

    # download dataset
    if folder_exists(os.path.join(path, "mirflickr")) is False:
        logging.info("[*] Flickr25k is nonexistent in {}".format(path))
        maybe_download_and_extract(filename, path, url, extract=True)
        del_file(os.path.join(path, filename))

    # return images by the given tag.
    # 1. image path list
    folder_imgs = os.path.join(path, "mirflickr")
    path_imgs = load_file_list(path=folder_imgs,
                               regx='\\.jpg',
                               printable=False)
    path_imgs.sort(key=natural_keys)

    # 2. tag path list
    folder_tags = os.path.join(path, "mirflickr", "meta", "tags")
    path_tags = load_file_list(path=folder_tags,
                               regx='\\.txt',
                               printable=False)
    path_tags.sort(key=natural_keys)

    # 3. select images
    if tag is None:
        logging.info("[Flickr25k] reading all images")
    else:
        logging.info("[Flickr25k] reading images with tag: {}".format(tag))
    images_list = []
    for idx, _v in enumerate(path_tags):
        tags = read_file(os.path.join(folder_tags, path_tags[idx])).split('\n')
        # logging.info(idx+1, tags)
        if tag is None or tag in tags:
            images_list.append(path_imgs[idx])

    images = visualize.read_images(images_list,
                                   folder_imgs,
                                   n_threads=n_threads,
                                   printable=printable)
    return images
예제 #18
0
def load_imdb_dataset(
        path='data', nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, start_char=1, oov_char=2,
        index_from=3
):
    """Load IMDB dataset.

    Parameters
    ----------
    path : str
        The path that the data is downloaded to, defaults is ``data/imdb/``.
    nb_words : int
        Number of words to get.
    skip_top : int
        Top most frequent words to ignore (they will appear as oov_char value in the sequence data).
    maxlen : int
        Maximum sequence length. Any longer sequence will be truncated.
    seed : int
        Seed for reproducible data shuffling.
    start_char : int
        The start of a sequence will be marked with this character. Set to 1 because 0 is usually the padding character.
    oov_char : int
        Words that were cut out because of the num_words or skip_top limit will be replaced with this character.
    index_from : int
        Index actual words with this index and higher.

    Examples
    --------
    >>> X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset(
    ...                                 nb_words=20000, test_split=0.2)
    >>> print('X_train.shape', X_train.shape)
    (20000,)  [[1, 62, 74, ... 1033, 507, 27],[1, 60, 33, ... 13, 1053, 7]..]
    >>> print('y_train.shape', y_train.shape)
    (20000,)  [1 0 0 ..., 1 0 1]

    References
    -----------
    - `Modified from keras. <https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py>`__

    """
    path = os.path.join(path, 'imdb')

    filename = "imdb.pkl"
    url = 'https://s3.amazonaws.com/text-datasets/'
    maybe_download_and_extract(filename, path, url)

    if filename.endswith(".gz"):
        f = gzip.open(os.path.join(path, filename), 'rb')
    else:
        f = open(os.path.join(path, filename), 'rb')

    X, labels = pickle.load(f)
    f.close()

    np.random.seed(seed)
    np.random.shuffle(X)
    np.random.seed(seed)
    np.random.shuffle(labels)

    if start_char is not None:
        X = [[start_char] + [w + index_from for w in x] for x in X]
    elif index_from:
        X = [[w + index_from for w in x] for x in X]

    if maxlen:
        new_X = []
        new_labels = []
        for x, y in zip(X, labels):
            if len(x) < maxlen:
                new_X.append(x)
                new_labels.append(y)
        X = new_X
        labels = new_labels
    if not X:
        raise Exception(
            'After filtering for sequences shorter than maxlen=' + str(maxlen) + ', no sequence was kept. '
            'Increase maxlen.'
        )
    if not nb_words:
        nb_words = max([max(x) for x in X])

    # by convention, use 2 as OOV word
    # reserve 'index_from' (=3 by default) characters: 0 (padding), 1 (start), 2 (OOV)
    if oov_char is not None:
        X = [[oov_char if (w >= nb_words or w < skip_top) else w for w in x] for x in X]
    else:
        nX = []
        for x in X:
            nx = []
            for w in x:
                if (w >= nb_words or w < skip_top):
                    nx.append(w)
            nX.append(nx)
        X = nX

    X_train = np.array(X[:int(len(X) * (1 - test_split))])
    y_train = np.array(labels[:int(len(X) * (1 - test_split))])

    X_test = np.array(X[int(len(X) * (1 - test_split)):])
    y_test = np.array(labels[int(len(X) * (1 - test_split)):])

    return X_train, y_train, X_test, y_test
예제 #19
0
def load_ptb_dataset(path='data'):
    """Load Penn TreeBank (PTB) dataset.

    It is used in many LANGUAGE MODELING papers,
    including "Empirical Evaluation and Combination of Advanced Language
    Modeling Techniques", "Recurrent Neural Network Regularization".
    It consists of 929k training words, 73k validation words, and 82k test
    words. It has 10k words in its vocabulary.

    Parameters
    ----------
    path : str
        The path that the data is downloaded to, defaults is ``data/ptb/``.

    Returns
    --------
    train_data, valid_data, test_data : list of int
        The training, validating and testing data in integer format.
    vocab_size : int
        The vocabulary size.

    Examples
    --------
    >>> train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset()

    References
    ---------------
    - ``tensorflow.models.rnn.ptb import reader``
    - `Manual download <http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz>`__

    Notes
    ------
    - If you want to get the raw data, see the source code.

    """
    path = os.path.join(path, 'ptb')
    logging.info(
        "Load or Download Penn TreeBank (PTB) dataset > {}".format(path))

    #Maybe dowload and uncompress tar, or load exsisting files
    filename = 'simple-examples.tgz'
    url = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/'
    maybe_download_and_extract(filename, path, url, extract=True)

    data_path = os.path.join(path, 'simple-examples', 'data')
    train_path = os.path.join(data_path, "ptb.train.txt")
    valid_path = os.path.join(data_path, "ptb.valid.txt")
    test_path = os.path.join(data_path, "ptb.test.txt")

    word_to_id = nlp.build_vocab(nlp.read_words(train_path))

    train_data = nlp.words_to_word_ids(nlp.read_words(train_path), word_to_id)
    valid_data = nlp.words_to_word_ids(nlp.read_words(valid_path), word_to_id)
    test_data = nlp.words_to_word_ids(nlp.read_words(test_path), word_to_id)
    vocab_size = len(word_to_id)

    # logging.info(nlp.read_words(train_path)) # ... 'according', 'to', 'mr.', '<unk>', '<eos>']
    # logging.info(train_data)                 # ...  214,         5,    23,    1,       2]
    # logging.info(word_to_id)                 # ... 'beyond': 1295, 'anti-nuclear': 9599, 'trouble': 1520, '<eos>': 2 ... }
    # logging.info(vocabulary)                 # 10000
    # exit()
    return train_data, valid_data, test_data, vocab_size
예제 #20
0
def load_flickr1M_dataset(tag='sky', size=10, path="data", n_threads=50, printable=False):
    """Load Flick1M dataset.

    Returns a list of images by a given tag from Flickr1M dataset,
    it will download Flickr1M from `the official website <http://press.liacs.nl/mirflickr/mirdownload.html>`__
    at the first time you use it.

    Parameters
    ------------
    tag : str or None
        What images to return.
            - If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search <https://www.flickr.com/search/>`__.
            - If you want to get all images, set to ``None``.

    size : int
        integer between 1 to 10. 1 means 100k images ... 5 means 500k images, 10 means all 1 million images. Default is 10.
    path : str
        The path that the data is downloaded to, defaults is ``data/flickr25k/``.
    n_threads : int
        The number of thread to read image.
    printable : boolean
        Whether to print infomation when reading images, default is ``False``.

    Examples
    ----------
    Use 200k images

    >>> images = tl.files.load_flickr1M_dataset(tag='zebra', size=2)

    Use 1 Million images

    >>> images = tl.files.load_flickr1M_dataset(tag='zebra')

    """
    import shutil

    path = os.path.join(path, 'flickr1M')
    logging.info("[Flickr1M] using {}% of images = {}".format(size * 10, size * 100000))
    images_zip = [
        'images0.zip', 'images1.zip', 'images2.zip', 'images3.zip', 'images4.zip', 'images5.zip', 'images6.zip',
        'images7.zip', 'images8.zip', 'images9.zip'
    ]
    tag_zip = 'tags.zip'
    url = 'http://press.liacs.nl/mirflickr/mirflickr1m/'

    # download dataset
    for image_zip in images_zip[0:size]:
        image_folder = image_zip.split(".")[0]
        # logging.info(path+"/"+image_folder)
        if folder_exists(os.path.join(path, image_folder)) is False:
            # logging.info(image_zip)
            logging.info("[Flickr1M] {} is missing in {}".format(image_folder, path))
            maybe_download_and_extract(image_zip, path, url, extract=True)
            del_file(os.path.join(path, image_zip))
            # os.system("mv {} {}".format(os.path.join(path, 'images'), os.path.join(path, image_folder)))
            shutil.move(os.path.join(path, 'images'), os.path.join(path, image_folder))
        else:
            logging.info("[Flickr1M] {} exists in {}".format(image_folder, path))

    # download tag
    if folder_exists(os.path.join(path, "tags")) is False:
        logging.info("[Flickr1M] tag files is nonexistent in {}".format(path))
        maybe_download_and_extract(tag_zip, path, url, extract=True)
        del_file(os.path.join(path, tag_zip))
    else:
        logging.info("[Flickr1M] tags exists in {}".format(path))

    # 1. image path list
    images_list = []
    images_folder_list = []
    for i in range(0, size):
        images_folder_list += load_folder_list(path=os.path.join(path, 'images%d' % i))
    images_folder_list.sort(key=lambda s: int(s.split('/')[-1]))  # folder/images/ddd

    for folder in images_folder_list[0:size * 10]:
        tmp = load_file_list(path=folder, regx='\\.jpg', printable=False)
        tmp.sort(key=lambda s: int(s.split('.')[-2]))  # ddd.jpg
        images_list.extend([os.path.join(folder, x) for x in tmp])

    # 2. tag path list
    tag_list = []
    tag_folder_list = load_folder_list(os.path.join(path, "tags"))

    # tag_folder_list.sort(key=lambda s: int(s.split("/")[-1]))  # folder/images/ddd
    tag_folder_list.sort(key=lambda s: int(os.path.basename(s)))

    for folder in tag_folder_list[0:size * 10]:
        tmp = load_file_list(path=folder, regx='\\.txt', printable=False)
        tmp.sort(key=lambda s: int(s.split('.')[-2]))  # ddd.txt
        tmp = [os.path.join(folder, s) for s in tmp]
        tag_list += tmp

    # 3. select images
    logging.info("[Flickr1M] searching tag: {}".format(tag))
    select_images_list = []
    for idx, _val in enumerate(tag_list):
        tags = read_file(tag_list[idx]).split('\n')
        if tag in tags:
            select_images_list.append(images_list[idx])

    logging.info("[Flickr1M] reading images with tag: {}".format(tag))
    images = visualize.read_images(select_images_list, '', n_threads=n_threads, printable=printable)
    return images
예제 #21
0
def load_mpii_pose_dataset(path='data', is_16_pos_only=False):
    """Load MPII Human Pose Dataset.

    Parameters
    -----------
    path : str
        The path that the data is downloaded to.
    is_16_pos_only : boolean
        If True, only return the peoples contain 16 pose keypoints. (Usually be used for single person pose estimation)

    Returns
    ----------
    img_train_list : list of str
        The image directories of training data.
    ann_train_list : list of dict
        The annotations of training data.
    img_test_list : list of str
        The image directories of testing data.
    ann_test_list : list of dict
        The annotations of testing data.

    Examples
    --------
    >>> import pprint
    >>> import tensorlayer as tl
    >>> img_train_list, ann_train_list, img_test_list, ann_test_list = tl.files.load_mpii_pose_dataset()
    >>> image = tl.vis.read_image(img_train_list[0])
    >>> tl.vis.draw_mpii_pose_to_image(image, ann_train_list[0], 'image.png')
    >>> pprint.pprint(ann_train_list[0])

    References
    -----------
    - `MPII Human Pose Dataset. CVPR 14 <http://human-pose.mpi-inf.mpg.de>`__
    - `MPII Human Pose Models. CVPR 16 <http://pose.mpi-inf.mpg.de>`__
    - `MPII Human Shape, Poselet Conditioned Pictorial Structures and etc <http://pose.mpi-inf.mpg.de/#related>`__
    - `MPII Keyponts and ID <http://human-pose.mpi-inf.mpg.de/#download>`__
    """
    path = os.path.join(path, 'mpii_human_pose')
    logging.info("Load or Download MPII Human Pose > {}".format(path))

    # annotation
    url = "http://datasets.d2.mpi-inf.mpg.de/andriluka14cvpr/"
    tar_filename = "mpii_human_pose_v1_u12_2.zip"
    extracted_filename = "mpii_human_pose_v1_u12_2"
    if folder_exists(os.path.join(path, extracted_filename)) is False:
        logging.info("[MPII] (annotation) {} is nonexistent in {}".format(
            extracted_filename, path))
        maybe_download_and_extract(tar_filename, path, url, extract=True)
        del_file(os.path.join(path, tar_filename))

    # images
    url = "http://datasets.d2.mpi-inf.mpg.de/andriluka14cvpr/"
    tar_filename = "mpii_human_pose_v1.tar.gz"
    extracted_filename2 = "images"
    if folder_exists(os.path.join(path, extracted_filename2)) is False:
        logging.info("[MPII] (images) {} is nonexistent in {}".format(
            extracted_filename, path))
        maybe_download_and_extract(tar_filename, path, url, extract=True)
        del_file(os.path.join(path, tar_filename))

    # parse annotation, format see http://human-pose.mpi-inf.mpg.de/#download
    import scipy.io as sio
    logging.info("reading annotations from mat file ...")
    # mat = sio.loadmat(os.path.join(path, extracted_filename, "mpii_human_pose_v1_u12_1.mat"))

    # def fix_wrong_joints(joint):    # https://github.com/mitmul/deeppose/blob/master/datasets/mpii_dataset.py
    #     if '12' in joint and '13' in joint and '2' in joint and '3' in joint:
    #         if ((joint['12'][0] < joint['13'][0]) and
    #                 (joint['3'][0] < joint['2'][0])):
    #             joint['2'], joint['3'] = joint['3'], joint['2']
    #         if ((joint['12'][0] > joint['13'][0]) and
    #                 (joint['3'][0] > joint['2'][0])):
    #             joint['2'], joint['3'] = joint['3'], joint['2']
    #     return joint

    ann_train_list = []
    ann_test_list = []
    img_train_list = []
    img_test_list = []

    def save_joints():
        # joint_data_fn = os.path.join(path, 'data.json')
        # fp = open(joint_data_fn, 'w')
        mat = sio.loadmat(
            os.path.join(path, extracted_filename,
                         "mpii_human_pose_v1_u12_1.mat"))

        for _, (anno, train_flag) in enumerate(  # all images
                zip(mat['RELEASE']['annolist'][0, 0][0],
                    mat['RELEASE']['img_train'][0, 0][0])):

            img_fn = anno['image']['name'][0, 0][0]
            train_flag = int(train_flag)

            # print(i, img_fn, train_flag) # DEBUG print all images

            if train_flag:
                img_train_list.append(img_fn)
                ann_train_list.append([])
            else:
                img_test_list.append(img_fn)
                ann_test_list.append([])

            head_rect = []
            if 'x1' in str(anno['annorect'].dtype):
                head_rect = zip([x1[0, 0] for x1 in anno['annorect']['x1'][0]],
                                [y1[0, 0] for y1 in anno['annorect']['y1'][0]],
                                [x2[0, 0] for x2 in anno['annorect']['x2'][0]],
                                [y2[0, 0] for y2 in anno['annorect']['y2'][0]])
            else:
                head_rect = []  # TODO

            if 'annopoints' in str(anno['annorect'].dtype):
                annopoints = anno['annorect']['annopoints'][0]
                head_x1s = anno['annorect']['x1'][0]
                head_y1s = anno['annorect']['y1'][0]
                head_x2s = anno['annorect']['x2'][0]
                head_y2s = anno['annorect']['y2'][0]

                for annopoint, head_x1, head_y1, head_x2, head_y2 in zip(
                        annopoints, head_x1s, head_y1s, head_x2s, head_y2s):
                    # if annopoint != []:
                    # if len(annopoint) != 0:
                    if annopoint.size:
                        head_rect = [
                            float(head_x1[0, 0]),
                            float(head_y1[0, 0]),
                            float(head_x2[0, 0]),
                            float(head_y2[0, 0])
                        ]

                        # joint coordinates
                        annopoint = annopoint['point'][0, 0]
                        j_id = [str(j_i[0, 0]) for j_i in annopoint['id'][0]]
                        x = [x[0, 0] for x in annopoint['x'][0]]
                        y = [y[0, 0] for y in annopoint['y'][0]]
                        joint_pos = {}
                        for _j_id, (_x, _y) in zip(j_id, zip(x, y)):
                            joint_pos[int(_j_id)] = [float(_x), float(_y)]
                        # joint_pos = fix_wrong_joints(joint_pos)

                        # visibility list
                        if 'is_visible' in str(annopoint.dtype):
                            vis = [
                                v[0] if v.size > 0 else [0]
                                for v in annopoint['is_visible'][0]
                            ]
                            vis = dict([(k, int(v[0])) if len(v) > 0 else v
                                        for k, v in zip(j_id, vis)])
                        else:
                            vis = None

                        # if len(joint_pos) == 16:
                        if ((is_16_pos_only == True) and
                            (len(joint_pos) == 16)) or (is_16_pos_only
                                                        == False):
                            # only use image with 16 key points / or use all
                            data = {
                                'filename': img_fn,
                                'train': train_flag,
                                'head_rect': head_rect,
                                'is_visible': vis,
                                'joint_pos': joint_pos
                            }
                            # print(json.dumps(data), file=fp)  # py3
                            if train_flag:
                                ann_train_list[-1].append(data)
                            else:
                                ann_test_list[-1].append(data)

    # def write_line(datum, fp):
    #     joints = sorted([[int(k), v] for k, v in datum['joint_pos'].items()])
    #     joints = np.array([j for i, j in joints]).flatten()
    #
    #     out = [datum['filename']]
    #     out.extend(joints)
    #     out = [str(o) for o in out]
    #     out = ','.join(out)
    #
    #     print(out, file=fp)

    # def split_train_test():
    #     # fp_test = open('data/mpii/test_joints.csv', 'w')
    #     fp_test = open(os.path.join(path, 'test_joints.csv'), 'w')
    #     # fp_train = open('data/mpii/train_joints.csv', 'w')
    #     fp_train = open(os.path.join(path, 'train_joints.csv'), 'w')
    #     # all_data = open('data/mpii/data.json').readlines()
    #     all_data = open(os.path.join(path, 'data.json')).readlines()
    #     N = len(all_data)
    #     N_test = int(N * 0.1)
    #     N_train = N - N_test
    #
    #     print('N:{}'.format(N))
    #     print('N_train:{}'.format(N_train))
    #     print('N_test:{}'.format(N_test))
    #
    #     np.random.seed(1701)
    #     perm = np.random.permutation(N)
    #     test_indices = perm[:N_test]
    #     train_indices = perm[N_test:]
    #
    #     print('train_indices:{}'.format(len(train_indices)))
    #     print('test_indices:{}'.format(len(test_indices)))
    #
    #     for i in train_indices:
    #         datum = json.loads(all_data[i].strip())
    #         write_line(datum, fp_train)
    #
    #     for i in test_indices:
    #         datum = json.loads(all_data[i].strip())
    #         write_line(datum, fp_test)

    save_joints()
    # split_train_test()  #

    ## read images dir
    logging.info("reading images list ...")
    img_dir = os.path.join(path, extracted_filename2)
    _img_list = load_file_list(path=os.path.join(path, extracted_filename2),
                               regx='\\.jpg',
                               printable=False)
    # ann_list = json.load(open(os.path.join(path, 'data.json')))
    for i, im in enumerate(img_train_list):
        if im not in _img_list:
            print(
                'missing training image {} in {} (remove from img(ann)_train_list)'
                .format(im, img_dir))
            # img_train_list.remove(im)
            del img_train_list[i]
            del ann_train_list[i]
    for i, im in enumerate(img_test_list):
        if im not in _img_list:
            print(
                'missing testing image {} in {} (remove from img(ann)_test_list)'
                .format(im, img_dir))
            # img_test_list.remove(im)
            del img_train_list[i]
            del ann_train_list[i]

    ## check annotation and images
    n_train_images = len(img_train_list)
    n_test_images = len(img_test_list)
    n_images = n_train_images + n_test_images
    logging.info("n_images: {} n_train_images: {} n_test_images: {}".format(
        n_images, n_train_images, n_test_images))
    n_train_ann = len(ann_train_list)
    n_test_ann = len(ann_test_list)
    n_ann = n_train_ann + n_test_ann
    logging.info("n_ann: {} n_train_ann: {} n_test_ann: {}".format(
        n_ann, n_train_ann, n_test_ann))
    n_train_people = len(sum(ann_train_list, []))
    n_test_people = len(sum(ann_test_list, []))
    n_people = n_train_people + n_test_people
    logging.info("n_people: {} n_train_people: {} n_test_people: {}".format(
        n_people, n_train_people, n_test_people))
    # add path to all image file name
    for i, value in enumerate(img_train_list):
        img_train_list[i] = os.path.join(img_dir, value)
    for i, value in enumerate(img_test_list):
        img_test_list[i] = os.path.join(img_dir, value)
    return img_train_list, ann_train_list, img_test_list, ann_test_list
예제 #22
0
def load_mpii_pose_dataset(path='data', is_16_pos_only=False):
    """Load MPII Human Pose Dataset.

    Parameters
    -----------
    path : str
        The path that the data is downloaded to.
    is_16_pos_only : boolean
        If True, only return the peoples contain 16 pose keypoints. (Usually be used for single person pose estimation)

    Returns
    ----------
    img_train_list : list of str
        The image directories of training data.
    ann_train_list : list of dict
        The annotations of training data.
    img_test_list : list of str
        The image directories of testing data.
    ann_test_list : list of dict
        The annotations of testing data.

    Examples
    --------
    >>> import pprint
    >>> import tensorlayer as tl
    >>> img_train_list, ann_train_list, img_test_list, ann_test_list = tl.files.load_mpii_pose_dataset()
    >>> image = tl.vis.read_image(img_train_list[0])
    >>> tl.vis.draw_mpii_pose_to_image(image, ann_train_list[0], 'image.png')
    >>> pprint.pprint(ann_train_list[0])

    References
    -----------
    - `MPII Human Pose Dataset. CVPR 14 <http://human-pose.mpi-inf.mpg.de>`__
    - `MPII Human Pose Models. CVPR 16 <http://pose.mpi-inf.mpg.de>`__
    - `MPII Human Shape, Poselet Conditioned Pictorial Structures and etc <http://pose.mpi-inf.mpg.de/#related>`__
    - `MPII Keyponts and ID <http://human-pose.mpi-inf.mpg.de/#download>`__
    """
    path = os.path.join(path, 'mpii_human_pose')
    logging.info("Load or Download MPII Human Pose > {}".format(path))

    # annotation
    url = "http://datasets.d2.mpi-inf.mpg.de/andriluka14cvpr/"
    tar_filename = "mpii_human_pose_v1_u12_2.zip"
    extracted_filename = "mpii_human_pose_v1_u12_2"
    if folder_exists(os.path.join(path, extracted_filename)) is False:
        logging.info("[MPII] (annotation) {} is nonexistent in {}".format(extracted_filename, path))
        maybe_download_and_extract(tar_filename, path, url, extract=True)
        del_file(os.path.join(path, tar_filename))

    # images
    url = "http://datasets.d2.mpi-inf.mpg.de/andriluka14cvpr/"
    tar_filename = "mpii_human_pose_v1.tar.gz"
    extracted_filename2 = "images"
    if folder_exists(os.path.join(path, extracted_filename2)) is False:
        logging.info("[MPII] (images) {} is nonexistent in {}".format(extracted_filename, path))
        maybe_download_and_extract(tar_filename, path, url, extract=True)
        del_file(os.path.join(path, tar_filename))

    # parse annotation, format see http://human-pose.mpi-inf.mpg.de/#download
    import scipy.io as sio
    logging.info("reading annotations from mat file ...")
    # mat = sio.loadmat(os.path.join(path, extracted_filename, "mpii_human_pose_v1_u12_1.mat"))

    # def fix_wrong_joints(joint):    # https://github.com/mitmul/deeppose/blob/master/datasets/mpii_dataset.py
    #     if '12' in joint and '13' in joint and '2' in joint and '3' in joint:
    #         if ((joint['12'][0] < joint['13'][0]) and
    #                 (joint['3'][0] < joint['2'][0])):
    #             joint['2'], joint['3'] = joint['3'], joint['2']
    #         if ((joint['12'][0] > joint['13'][0]) and
    #                 (joint['3'][0] > joint['2'][0])):
    #             joint['2'], joint['3'] = joint['3'], joint['2']
    #     return joint

    ann_train_list = []
    ann_test_list = []
    img_train_list = []
    img_test_list = []

    def save_joints():
        # joint_data_fn = os.path.join(path, 'data.json')
        # fp = open(joint_data_fn, 'w')
        mat = sio.loadmat(os.path.join(path, extracted_filename, "mpii_human_pose_v1_u12_1.mat"))

        for _, (anno, train_flag) in enumerate(  # all images
                zip(mat['RELEASE']['annolist'][0, 0][0], mat['RELEASE']['img_train'][0, 0][0])):

            img_fn = anno['image']['name'][0, 0][0]
            train_flag = int(train_flag)

            # print(i, img_fn, train_flag) # DEBUG print all images

            if train_flag:
                img_train_list.append(img_fn)
                ann_train_list.append([])
            else:
                img_test_list.append(img_fn)
                ann_test_list.append([])

            head_rect = []
            if 'x1' in str(anno['annorect'].dtype):
                head_rect = zip(
                    [x1[0, 0] for x1 in anno['annorect']['x1'][0]], [y1[0, 0] for y1 in anno['annorect']['y1'][0]],
                    [x2[0, 0] for x2 in anno['annorect']['x2'][0]], [y2[0, 0] for y2 in anno['annorect']['y2'][0]]
                )
            else:
                head_rect = []  # TODO

            if 'annopoints' in str(anno['annorect'].dtype):
                annopoints = anno['annorect']['annopoints'][0]
                head_x1s = anno['annorect']['x1'][0]
                head_y1s = anno['annorect']['y1'][0]
                head_x2s = anno['annorect']['x2'][0]
                head_y2s = anno['annorect']['y2'][0]

                for annopoint, head_x1, head_y1, head_x2, head_y2 in zip(annopoints, head_x1s, head_y1s, head_x2s,
                                                                         head_y2s):
                    # if annopoint != []:
                    # if len(annopoint) != 0:
                    if annopoint.size:
                        head_rect = [
                            float(head_x1[0, 0]),
                            float(head_y1[0, 0]),
                            float(head_x2[0, 0]),
                            float(head_y2[0, 0])
                        ]

                        # joint coordinates
                        annopoint = annopoint['point'][0, 0]
                        j_id = [str(j_i[0, 0]) for j_i in annopoint['id'][0]]
                        x = [x[0, 0] for x in annopoint['x'][0]]
                        y = [y[0, 0] for y in annopoint['y'][0]]
                        joint_pos = {}
                        for _j_id, (_x, _y) in zip(j_id, zip(x, y)):
                            joint_pos[int(_j_id)] = [float(_x), float(_y)]
                        # joint_pos = fix_wrong_joints(joint_pos)

                        # visibility list
                        if 'is_visible' in str(annopoint.dtype):
                            vis = [v[0] if v.size > 0 else [0] for v in annopoint['is_visible'][0]]
                            vis = dict([(k, int(v[0])) if len(v) > 0 else v for k, v in zip(j_id, vis)])
                        else:
                            vis = None

                        # if len(joint_pos) == 16:
                        if ((is_16_pos_only ==True) and (len(joint_pos) == 16)) or (is_16_pos_only == False):
                            # only use image with 16 key points / or use all
                            data = {
                                'filename': img_fn,
                                'train': train_flag,
                                'head_rect': head_rect,
                                'is_visible': vis,
                                'joint_pos': joint_pos
                            }
                            # print(json.dumps(data), file=fp)  # py3
                            if train_flag:
                                ann_train_list[-1].append(data)
                            else:
                                ann_test_list[-1].append(data)

    # def write_line(datum, fp):
    #     joints = sorted([[int(k), v] for k, v in datum['joint_pos'].items()])
    #     joints = np.array([j for i, j in joints]).flatten()
    #
    #     out = [datum['filename']]
    #     out.extend(joints)
    #     out = [str(o) for o in out]
    #     out = ','.join(out)
    #
    #     print(out, file=fp)

    # def split_train_test():
    #     # fp_test = open('data/mpii/test_joints.csv', 'w')
    #     fp_test = open(os.path.join(path, 'test_joints.csv'), 'w')
    #     # fp_train = open('data/mpii/train_joints.csv', 'w')
    #     fp_train = open(os.path.join(path, 'train_joints.csv'), 'w')
    #     # all_data = open('data/mpii/data.json').readlines()
    #     all_data = open(os.path.join(path, 'data.json')).readlines()
    #     N = len(all_data)
    #     N_test = int(N * 0.1)
    #     N_train = N - N_test
    #
    #     print('N:{}'.format(N))
    #     print('N_train:{}'.format(N_train))
    #     print('N_test:{}'.format(N_test))
    #
    #     np.random.seed(1701)
    #     perm = np.random.permutation(N)
    #     test_indices = perm[:N_test]
    #     train_indices = perm[N_test:]
    #
    #     print('train_indices:{}'.format(len(train_indices)))
    #     print('test_indices:{}'.format(len(test_indices)))
    #
    #     for i in train_indices:
    #         datum = json.loads(all_data[i].strip())
    #         write_line(datum, fp_train)
    #
    #     for i in test_indices:
    #         datum = json.loads(all_data[i].strip())
    #         write_line(datum, fp_test)

    save_joints()
    # split_train_test()  #

    ## read images dir
    logging.info("reading images list ...")
    img_dir = os.path.join(path, extracted_filename2)
    _img_list = load_file_list(path=os.path.join(path, extracted_filename2), regx='\\.jpg', printable=False)
    # ann_list = json.load(open(os.path.join(path, 'data.json')))
    for i, im in enumerate(img_train_list):
        if im not in _img_list:
            print('missing training image {} in {} (remove from img(ann)_train_list)'.format(im, img_dir))
            # img_train_list.remove(im)
            del img_train_list[i]
            del ann_train_list[i]
    for i, im in enumerate(img_test_list):
        if im not in _img_list:
            print('missing testing image {} in {} (remove from img(ann)_test_list)'.format(im, img_dir))
            # img_test_list.remove(im)
            del img_train_list[i]
            del ann_train_list[i]

    ## check annotation and images
    n_train_images = len(img_train_list)
    n_test_images = len(img_test_list)
    n_images = n_train_images + n_test_images
    logging.info("n_images: {} n_train_images: {} n_test_images: {}".format(n_images, n_train_images, n_test_images))
    n_train_ann = len(ann_train_list)
    n_test_ann = len(ann_test_list)
    n_ann = n_train_ann + n_test_ann
    logging.info("n_ann: {} n_train_ann: {} n_test_ann: {}".format(n_ann, n_train_ann, n_test_ann))
    n_train_people = len(sum(ann_train_list, []))
    n_test_people = len(sum(ann_test_list, []))
    n_people = n_train_people + n_test_people
    logging.info("n_people: {} n_train_people: {} n_test_people: {}".format(n_people, n_train_people, n_test_people))
    # add path to all image file name
    for i, value in enumerate(img_train_list):
        img_train_list[i] = os.path.join(img_dir, value)
    for i, value in enumerate(img_test_list):
        img_test_list[i] = os.path.join(img_dir, value)
    return img_train_list, ann_train_list, img_test_list, ann_test_list
예제 #23
0
파일: myfile.py 프로젝트: xiangwenliu/DENN
def load_cifar10_dataset(
        shape=(-1, 32, 32, 3), path='./data/cifar10/', plotable=False,
        second=3):

    print("Load or Download cifar10 > {}".format(path))

    #Helper function to unpickle the data
    def unpickle(file):
        fp = open(file, 'rb')
        if sys.version_info.major == 2:
            data = pickle.load(fp)
        elif sys.version_info.major == 3:
            data = pickle.load(fp, encoding='latin-1')
        fp.close()
        return data

    filename = 'cifar-10-python.tar.gz'
    url = 'https://www.cs.toronto.edu/~kriz/'
    # #Download and uncompress file
    maybe_download_and_extract(filename, path, url, extract=True)

    #Unpickle file and fill in data
    X_train = None
    y_train = []
    for i in range(1, 6):
        data_dic = unpickle(
            os.path.join(path, 'cifar-10-batches-py/',
                         "data_batch_{}".format(i)))
        if i == 1:
            X_train = data_dic['data']
        else:
            X_train = np.vstack((X_train, data_dic['data']))
        y_train += data_dic['labels']

    test_data_dic = unpickle(
        os.path.join(path, 'cifar-10-batches-py/', "test_batch"))
    X_test = test_data_dic['data']
    y_test = np.array(test_data_dic['labels'])

    if shape == (-1, 3, 32, 32):
        X_test = X_test.reshape(shape)
        X_train = X_train.reshape(shape)
    elif shape == (-1, 32, 32, 3):
        X_test = X_test.reshape(shape, order='F')
        X_train = X_train.reshape(shape, order='F')
        X_test = np.transpose(X_test, (0, 2, 1, 3))
        X_train = np.transpose(X_train, (0, 2, 1, 3))
    else:
        X_test = X_test.reshape(shape)
        X_train = X_train.reshape(shape)

    y_train = np.array(y_train)

    if plotable == True:
        print('\nCIFAR-10')
        import matplotlib.pyplot as plt
        fig = plt.figure(1)

        print('Shape of a training image: X_train[0]', X_train[0].shape)

        plt.ion()  # interactive mode
        count = 1
        for row in range(10):
            for col in range(10):
                a = fig.add_subplot(10, 10, count)
                if shape == (-1, 3, 32, 32):
                    # plt.imshow(X_train[count-1], interpolation='nearest')
                    plt.imshow(np.transpose(X_train[count - 1], (1, 2, 0)),
                               interpolation='nearest')
                    # plt.imshow(np.transpose(X_train[count-1], (2, 1, 0)), interpolation='nearest')
                elif shape == (-1, 32, 32, 3):
                    plt.imshow(X_train[count - 1], interpolation='nearest')
                    # plt.imshow(np.transpose(X_train[count-1], (1, 0, 2)), interpolation='nearest')
                else:
                    raise Exception(
                        "Do not support the given 'shape' to plot the image examples"
                    )
                plt.gca().xaxis.set_major_locator(
                    plt.NullLocator())  # 不显示刻度(tick)
                plt.gca().yaxis.set_major_locator(plt.NullLocator())
                count = count + 1
        plt.draw()  # interactive mode
        plt.pause(3)  # interactive mode

        print("X_train:", X_train.shape)
        print("y_train:", y_train.shape)
        print("X_test:", X_test.shape)
        print("y_test:", y_test.shape)

    X_train = np.asarray(X_train, dtype=np.float32)
    X_test = np.asarray(X_test, dtype=np.float32)
    y_train = np.asarray(y_train, dtype=np.int32)
    y_test = np.asarray(y_test, dtype=np.int32)

    return X_train, y_train, X_test, y_test
예제 #24
0
def load_cifar10_dataset(shape=(-1, 32, 32, 3), path='data', plotable=False):
    """Load CIFAR-10 dataset.

    It consists of 60000 32x32 colour images in 10 classes, with
    6000 images per class. There are 50000 training images and 10000 test images.

    The dataset is divided into five training batches and one test batch, each with
    10000 images. The test batch contains exactly 1000 randomly-selected images from
    each class. The training batches contain the remaining images in random order,
    but some training batches may contain more images from one class than another.
    Between them, the training batches contain exactly 5000 images from each class.

    Parameters
    ----------
    shape : tupe
        The shape of digit images e.g. (-1, 3, 32, 32) and (-1, 32, 32, 3).
    path : str
        The path that the data is downloaded to, defaults is ``data/cifar10/``.
    plotable : boolean
        Whether to plot some image examples, False as default.

    Examples
    --------
    >>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3))

    References
    ----------
    - `CIFAR website <https://www.cs.toronto.edu/~kriz/cifar.html>`__
    - `Data download link <https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz>`__
    - `<https://teratail.com/questions/28932>`__

    """
    path = os.path.join(path, 'cifar10')
    logging.info("Load or Download cifar10 > {}".format(path))

    #Helper function to unpickle the data
    def unpickle(file):
        fp = open(file, 'rb')
        if sys.version_info.major == 2:
            data = pickle.load(fp)
        elif sys.version_info.major == 3:
            data = pickle.load(fp, encoding='latin-1')
        else:
            raise RuntimeError("Sys Version Unsupported")
        fp.close()
        return data

    filename = 'cifar-10-python.tar.gz'
    url = 'https://www.cs.toronto.edu/~kriz/'
    #Download and uncompress file
    maybe_download_and_extract(filename, path, url, extract=True)

    #Unpickle file and fill in data
    X_train = None
    y_train = []
    for i in range(1, 6):
        data_dic = unpickle(os.path.join(path, 'cifar-10-batches-py/', "data_batch_{}".format(i)))
        if i == 1:
            X_train = data_dic['data']
        else:
            X_train = np.vstack((X_train, data_dic['data']))
        y_train += data_dic['labels']

    test_data_dic = unpickle(os.path.join(path, 'cifar-10-batches-py/', "test_batch"))
    X_test = test_data_dic['data']
    y_test = np.array(test_data_dic['labels'])

    if shape == (-1, 3, 32, 32):
        X_test = X_test.reshape(shape)
        X_train = X_train.reshape(shape)
    elif shape == (-1, 32, 32, 3):
        X_test = X_test.reshape(shape, order='F')
        X_train = X_train.reshape(shape, order='F')
        X_test = np.transpose(X_test, (0, 2, 1, 3))
        X_train = np.transpose(X_train, (0, 2, 1, 3))
    else:
        X_test = X_test.reshape(shape)
        X_train = X_train.reshape(shape)

    y_train = np.array(y_train)

    if plotable:
        logging.info('\nCIFAR-10')
        import matplotlib.pyplot as plt
        fig = plt.figure(1)

        logging.info('Shape of a training image: X_train[0] %s' % X_train[0].shape)

        plt.ion()  # interactive mode
        count = 1
        for _ in range(10):  # each row
            for _ in range(10):  # each column
                _ = fig.add_subplot(10, 10, count)
                if shape == (-1, 3, 32, 32):
                    # plt.imshow(X_train[count-1], interpolation='nearest')
                    plt.imshow(np.transpose(X_train[count - 1], (1, 2, 0)), interpolation='nearest')
                    # plt.imshow(np.transpose(X_train[count-1], (2, 1, 0)), interpolation='nearest')
                elif shape == (-1, 32, 32, 3):
                    plt.imshow(X_train[count - 1], interpolation='nearest')
                    # plt.imshow(np.transpose(X_train[count-1], (1, 0, 2)), interpolation='nearest')
                else:
                    raise Exception("Do not support the given 'shape' to plot the image examples")
                plt.gca().xaxis.set_major_locator(plt.NullLocator())
                plt.gca().yaxis.set_major_locator(plt.NullLocator())
                count = count + 1
        plt.draw()  # interactive mode
        plt.pause(3)  # interactive mode

        logging.info("X_train: %s" % X_train.shape)
        logging.info("y_train: %s" % y_train.shape)
        logging.info("X_test:  %s" % X_test.shape)
        logging.info("y_test:  %s" % y_test.shape)

    X_train = np.asarray(X_train, dtype=np.float32)
    X_test = np.asarray(X_test, dtype=np.float32)
    y_train = np.asarray(y_train, dtype=np.int32)
    y_test = np.asarray(y_test, dtype=np.int32)

    return X_train, y_train, X_test, y_test
예제 #25
0
def load_flickr25k_dataset(tag='sky', path="data", n_threads=50, printable=False):
    """Load Flickr25K dataset.

    Returns a list of images by a given tag from Flick25k dataset,
    it will download Flickr25k from `the official website <http://press.liacs.nl/mirflickr/mirdownload.html>`__
    at the first time you use it.

    Parameters
    ------------
    tag : str or None
        What images to return.
            - If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search <https://www.flickr.com/search/>`__.
            - If you want to get all images, set to ``None``.

    path : str
        The path that the data is downloaded to, defaults is ``data/flickr25k/``.
    n_threads : int
        The number of thread to read image.
    printable : boolean
        Whether to print infomation when reading images, default is ``False``.

    Examples
    -----------
    Get images with tag of sky

    >>> images = tl.files.load_flickr25k_dataset(tag='sky')

    Get all images

    >>> images = tl.files.load_flickr25k_dataset(tag=None, n_threads=100, printable=True)

    """
    path = os.path.join(path, 'flickr25k')

    filename = 'mirflickr25k.zip'
    url = 'http://press.liacs.nl/mirflickr/mirflickr25k/'

    # download dataset
    if folder_exists(os.path.join(path, "mirflickr")) is False:
        logging.info("[*] Flickr25k is nonexistent in {}".format(path))
        maybe_download_and_extract(filename, path, url, extract=True)
        del_file(os.path.join(path, filename))

    # return images by the given tag.
    # 1. image path list
    folder_imgs = os.path.join(path, "mirflickr")
    path_imgs = load_file_list(path=folder_imgs, regx='\\.jpg', printable=False)
    path_imgs.sort(key=natural_keys)

    # 2. tag path list
    folder_tags = os.path.join(path, "mirflickr", "meta", "tags")
    path_tags = load_file_list(path=folder_tags, regx='\\.txt', printable=False)
    path_tags.sort(key=natural_keys)

    # 3. select images
    if tag is None:
        logging.info("[Flickr25k] reading all images")
    else:
        logging.info("[Flickr25k] reading images with tag: {}".format(tag))
    images_list = []
    for idx, _v in enumerate(path_tags):
        tags = read_file(os.path.join(folder_tags, path_tags[idx])).split('\n')
        # logging.info(idx+1, tags)
        if tag is None or tag in tags:
            images_list.append(path_imgs[idx])

    images = visualize.read_images(images_list, folder_imgs, n_threads=n_threads, printable=printable)
    return images
예제 #26
0
def load_voc_dataset(path='data', dataset='2012', contain_classes_in_person=False):
    """Pascal VOC 2007/2012 Dataset.

    It has 20 objects:
    aeroplane, bicycle, bird, boat, bottle, bus, car, cat, chair, cow, diningtable, dog, horse, motorbike, person, pottedplant, sheep, sofa, train, tvmonitor
    and additional 3 classes : head, hand, foot for person.

    Parameters
    -----------
    path : str
        The path that the data is downloaded to, defaults is ``data/VOC``.
    dataset : str
        The VOC dataset version, `2012`, `2007`, `2007test` or `2012test`. We usually train model on `2007+2012` and test it on `2007test`.
    contain_classes_in_person : boolean
        Whether include head, hand and foot annotation, default is False.

    Returns
    ---------
    imgs_file_list : list of str
        Full paths of all images.
    imgs_semseg_file_list : list of str
        Full paths of all maps for semantic segmentation. Note that not all images have this map!
    imgs_insseg_file_list : list of str
        Full paths of all maps for instance segmentation. Note that not all images have this map!
    imgs_ann_file_list : list of str
        Full paths of all annotations for bounding box and object class, all images have this annotations.
    classes : list of str
        Classes in order.
    classes_in_person : list of str
        Classes in person.
    classes_dict : dictionary
        Class label to integer.
    n_objs_list : list of int
        Number of objects in all images in ``imgs_file_list`` in order.
    objs_info_list : list of str
        Darknet format for the annotation of all images in ``imgs_file_list`` in order. ``[class_id x_centre y_centre width height]`` in ratio format.
    objs_info_dicts : dictionary
        The annotation of all images in ``imgs_file_list``, ``{imgs_file_list : dictionary for annotation}``,
        format from `TensorFlow/Models/object-detection <https://github.com/tensorflow/models/blob/master/object_detection/create_pascal_tf_record.py>`__.

    Examples
    ----------
    >>> imgs_file_list, imgs_semseg_file_list, imgs_insseg_file_list, imgs_ann_file_list,
    >>>     classes, classes_in_person, classes_dict,
    >>>     n_objs_list, objs_info_list, objs_info_dicts = tl.files.load_voc_dataset(dataset="2012", contain_classes_in_person=False)
    >>> idx = 26
    >>> print(classes)
    ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
    >>> print(classes_dict)
    {'sheep': 16, 'horse': 12, 'bicycle': 1, 'bottle': 4, 'cow': 9, 'sofa': 17, 'car': 6, 'dog': 11, 'cat': 7, 'person': 14, 'train': 18, 'diningtable': 10, 'aeroplane': 0, 'bus': 5, 'pottedplant': 15, 'tvmonitor': 19, 'chair': 8, 'bird': 2, 'boat': 3, 'motorbike': 13}
    >>> print(imgs_file_list[idx])
    data/VOC/VOC2012/JPEGImages/2007_000423.jpg
    >>> print(n_objs_list[idx])
    2
    >>> print(imgs_ann_file_list[idx])
    data/VOC/VOC2012/Annotations/2007_000423.xml
    >>> print(objs_info_list[idx])
    14 0.173 0.461333333333 0.142 0.496
    14 0.828 0.542666666667 0.188 0.594666666667
    >>> ann = tl.prepro.parse_darknet_ann_str_to_list(objs_info_list[idx])
    >>> print(ann)
    [[14, 0.173, 0.461333333333, 0.142, 0.496], [14, 0.828, 0.542666666667, 0.188, 0.594666666667]]
    >>> c, b = tl.prepro.parse_darknet_ann_list_to_cls_box(ann)
    >>> print(c, b)
    [14, 14] [[0.173, 0.461333333333, 0.142, 0.496], [0.828, 0.542666666667, 0.188, 0.594666666667]]

    References
    -------------
    - `Pascal VOC2012 Website <https://pjreddie.com/projects/pascal-voc-dataset-mirror/>`__.
    - `Pascal VOC2007 Website <https://pjreddie.com/projects/pascal-voc-dataset-mirror/>`__.

    """
    try:
        import lxml.etree as etree
    except ImportError as e:
        print(e)
        raise ImportError("Module lxml not found. Please install lxml via pip or other package managers.")

    path = os.path.join(path, 'VOC')

    def _recursive_parse_xml_to_dict(xml):
        """Recursively parses XML contents to python dict.

        We assume that `object` tags are the only ones that can appear
        multiple times at the same level of a tree.

        Args:
            xml: xml tree obtained by parsing XML file contents using lxml.etree

        Returns:
            Python dictionary holding XML contents.

        """
        if xml is not None:
            return {xml.tag: xml.text}
        result = {}
        for child in xml:
            child_result = _recursive_parse_xml_to_dict(child)
            if child.tag != 'object':
                result[child.tag] = child_result[child.tag]
            else:
                if child.tag not in result:
                    result[child.tag] = []
                result[child.tag].append(child_result[child.tag])
        return {xml.tag: result}

    import xml.etree.ElementTree as ET

    if dataset == "2012":
        url = "http://pjreddie.com/media/files/"
        tar_filename = "VOCtrainval_11-May-2012.tar"
        extracted_filename = "VOC2012"  #"VOCdevkit/VOC2012"
        logging.info("    [============= VOC 2012 =============]")
    elif dataset == "2012test":
        extracted_filename = "VOC2012test"  #"VOCdevkit/VOC2012"
        logging.info("    [============= VOC 2012 Test Set =============]")
        logging.info(
            "    \nAuthor: 2012test only have person annotation, so 2007test is highly recommended for testing !\n"
        )
        import time
        time.sleep(3)
        if os.path.isdir(os.path.join(path, extracted_filename)) is False:
            logging.info("For VOC 2012 Test data - online registration required")
            logging.info(
                " Please download VOC2012test.tar from:  \n register: http://host.robots.ox.ac.uk:8080 \n voc2012 : http://host.robots.ox.ac.uk:8080/eval/challenges/voc2012/ \ndownload: http://host.robots.ox.ac.uk:8080/eval/downloads/VOC2012test.tar"
            )
            logging.info(" unzip VOC2012test.tar,rename the folder to VOC2012test and put it into %s" % path)
            exit()
        # # http://host.robots.ox.ac.uk:8080/eval/downloads/VOC2012test.tar
        # url = "http://host.robots.ox.ac.uk:8080/eval/downloads/"
        # tar_filename = "VOC2012test.tar"
    elif dataset == "2007":
        url = "http://pjreddie.com/media/files/"
        tar_filename = "VOCtrainval_06-Nov-2007.tar"
        extracted_filename = "VOC2007"
        logging.info("    [============= VOC 2007 =============]")
    elif dataset == "2007test":
        # http://host.robots.ox.ac.uk/pascal/VOC/voc2007/index.html#testdata
        # http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar
        url = "http://pjreddie.com/media/files/"
        tar_filename = "VOCtest_06-Nov-2007.tar"
        extracted_filename = "VOC2007test"
        logging.info("    [============= VOC 2007 Test Set =============]")
    else:
        raise Exception("Please set the dataset aug to 2012, 2012test or 2007.")

    # download dataset
    if dataset != "2012test":
        from sys import platform as _platform
        if folder_exists(os.path.join(path, extracted_filename)) is False:
            logging.info("[VOC] {} is nonexistent in {}".format(extracted_filename, path))
            maybe_download_and_extract(tar_filename, path, url, extract=True)
            del_file(os.path.join(path, tar_filename))
            if dataset == "2012":
                if _platform == "win32":
                    os.system("move {}\VOCdevkit\VOC2012 {}\VOC2012".format(path, path))
                else:
                    os.system("mv {}/VOCdevkit/VOC2012 {}/VOC2012".format(path, path))
            elif dataset == "2007":
                if _platform == "win32":
                    os.system("move {}\VOCdevkit\VOC2007 {}\VOC2007".format(path, path))
                else:
                    os.system("mv {}/VOCdevkit/VOC2007 {}/VOC2007".format(path, path))
            elif dataset == "2007test":
                if _platform == "win32":
                    os.system("move {}\VOCdevkit\VOC2007 {}\VOC2007test".format(path, path))
                else:
                    os.system("mv {}/VOCdevkit/VOC2007 {}/VOC2007test".format(path, path))
            del_folder(os.path.join(path, 'VOCdevkit'))
    # object classes(labels)  NOTE: YOU CAN CUSTOMIZE THIS LIST
    classes = [
        "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog",
        "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"
    ]
    if contain_classes_in_person:
        classes_in_person = ["head", "hand", "foot"]
    else:
        classes_in_person = []

    classes += classes_in_person  # use extra 3 classes for person

    classes_dict = utils.list_string_to_dict(classes)
    logging.info("[VOC] object classes {}".format(classes_dict))

    # 1. image path list
    # folder_imgs = path+"/"+extracted_filename+"/JPEGImages/"
    folder_imgs = os.path.join(path, extracted_filename, "JPEGImages")
    imgs_file_list = load_file_list(path=folder_imgs, regx='\\.jpg', printable=False)
    logging.info("[VOC] {} images found".format(len(imgs_file_list)))

    imgs_file_list.sort(
        key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
    )  # 2007_000027.jpg --> 2007000027

    imgs_file_list = [os.path.join(folder_imgs, s) for s in imgs_file_list]
    # logging.info('IM',imgs_file_list[0::3333], imgs_file_list[-1])
    if dataset != "2012test":
        ##======== 2. semantic segmentation maps path list
        # folder_semseg = path+"/"+extracted_filename+"/SegmentationClass/"
        folder_semseg = os.path.join(path, extracted_filename, "SegmentationClass")
        imgs_semseg_file_list = load_file_list(path=folder_semseg, regx='\\.png', printable=False)
        logging.info("[VOC] {} maps for semantic segmentation found".format(len(imgs_semseg_file_list)))
        imgs_semseg_file_list.sort(
            key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
        )  # 2007_000032.png --> 2007000032
        imgs_semseg_file_list = [os.path.join(folder_semseg, s) for s in imgs_semseg_file_list]
        # logging.info('Semantic Seg IM',imgs_semseg_file_list[0::333], imgs_semseg_file_list[-1])
        ##======== 3. instance segmentation maps path list
        # folder_insseg = path+"/"+extracted_filename+"/SegmentationObject/"
        folder_insseg = os.path.join(path, extracted_filename, "SegmentationObject")
        imgs_insseg_file_list = load_file_list(path=folder_insseg, regx='\\.png', printable=False)
        logging.info("[VOC] {} maps for instance segmentation found".format(len(imgs_semseg_file_list)))
        imgs_insseg_file_list.sort(
            key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
        )  # 2007_000032.png --> 2007000032
        imgs_insseg_file_list = [os.path.join(folder_insseg, s) for s in imgs_insseg_file_list]
        # logging.info('Instance Seg IM',imgs_insseg_file_list[0::333], imgs_insseg_file_list[-1])
    else:
        imgs_semseg_file_list = []
        imgs_insseg_file_list = []
    # 4. annotations for bounding box and object class
    # folder_ann = path+"/"+extracted_filename+"/Annotations/"
    folder_ann = os.path.join(path, extracted_filename, "Annotations")
    imgs_ann_file_list = load_file_list(path=folder_ann, regx='\\.xml', printable=False)
    logging.info(
        "[VOC] {} XML annotation files for bounding box and object class found".format(len(imgs_ann_file_list))
    )
    imgs_ann_file_list.sort(
        key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
    )  # 2007_000027.xml --> 2007000027
    imgs_ann_file_list = [os.path.join(folder_ann, s) for s in imgs_ann_file_list]
    # logging.info('ANN',imgs_ann_file_list[0::3333], imgs_ann_file_list[-1])

    if dataset == "2012test":  # remove unused images in JPEG folder
        imgs_file_list_new = []
        for ann in imgs_ann_file_list:
            ann = os.path.split(ann)[-1].split('.')[0]
            for im in imgs_file_list:
                if ann in im:
                    imgs_file_list_new.append(im)
                    break
        imgs_file_list = imgs_file_list_new
        logging.info("[VOC] keep %d images" % len(imgs_file_list_new))

    # parse XML annotations
    def convert(size, box):
        dw = 1. / size[0]
        dh = 1. / size[1]
        x = (box[0] + box[1]) / 2.0
        y = (box[2] + box[3]) / 2.0
        w = box[1] - box[0]
        h = box[3] - box[2]
        x = x * dw
        w = w * dw
        y = y * dh
        h = h * dh
        return x, y, w, h

    def convert_annotation(file_name):
        """Given VOC2012 XML Annotations, returns number of objects and info."""
        in_file = open(file_name)
        out_file = ""
        tree = ET.parse(in_file)
        root = tree.getroot()
        size = root.find('size')
        w = int(size.find('width').text)
        h = int(size.find('height').text)
        n_objs = 0

        for obj in root.iter('object'):
            if dataset != "2012test":
                difficult = obj.find('difficult').text
                cls = obj.find('name').text
                if cls not in classes or int(difficult) == 1:
                    continue
            else:
                cls = obj.find('name').text
                if cls not in classes:
                    continue
            cls_id = classes.index(cls)
            xmlbox = obj.find('bndbox')
            b = (
                float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
                float(xmlbox.find('ymax').text)
            )
            bb = convert((w, h), b)

            out_file += str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n'
            n_objs += 1
            if cls in "person":
                for part in obj.iter('part'):
                    cls = part.find('name').text
                    if cls not in classes_in_person:
                        continue
                    cls_id = classes.index(cls)
                    xmlbox = part.find('bndbox')
                    b = (
                        float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text),
                        float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)
                    )
                    bb = convert((w, h), b)
                    # out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
                    out_file += str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n'
                    n_objs += 1
        in_file.close()
        return n_objs, out_file

    logging.info("[VOC] Parsing xml annotations files")
    n_objs_list = []
    objs_info_list = []  # Darknet Format list of string
    objs_info_dicts = {}
    for idx, ann_file in enumerate(imgs_ann_file_list):
        n_objs, objs_info = convert_annotation(ann_file)
        n_objs_list.append(n_objs)
        objs_info_list.append(objs_info)
        with tf.io.gfile.GFile(ann_file, 'r') as fid:
            xml_str = fid.read()
        xml = etree.fromstring(xml_str)
        data = _recursive_parse_xml_to_dict(xml)['annotation']
        objs_info_dicts.update({imgs_file_list[idx]: data})

    return imgs_file_list, imgs_semseg_file_list, imgs_insseg_file_list, imgs_ann_file_list, classes, classes_in_person, classes_dict, n_objs_list, objs_info_list, objs_info_dicts