def test_load_openvino(self):
     local_path = self.create_temp_dir()
     model = InferenceModel(1)
     model_url = data_url + "/analytics-zoo-models/openvino/2018_R5/resnet_v1_50.xml"
     weight_url = data_url + "/analytics-zoo-models/openvino/2018_R5/resnet_v1_50.bin"
     model_path = maybe_download("resnet_v1_50.xml",
                                 local_path, model_url)
     weight_path = maybe_download("resnet_v1_50.bin",
                                  local_path, weight_url)
     model.load_openvino(model_path, weight_path)
     input_data = np.random.random([4, 1, 224, 224, 3])
     model.predict(input_data)
Beispiel #2
0
def read_data_sets(train_dir, data_type="train"):
    """
    Parse or download mnist data if train_dir is empty.

    :param: train_dir: The directory storing the mnist data

    :param: data_type: Reading training set or testing set.It can be either "train" or "test"

    :return:

    ```
    (ndarray, ndarray) representing (features, labels)
    features is a 4D unit8 numpy array [index, y, x, depth]
    representing each pixel valued from 0 to 255.
    labels is 1D unit8 nunpy array representing the label valued from 0 to 9.
    ```

    """
    TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
    TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
    TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
    TEST_LABELS = 't10k-labels-idx1-ubyte.gz'

    if data_type == "train":
        local_file = base.maybe_download(TRAIN_IMAGES, train_dir,
                                         SOURCE_URL + TRAIN_IMAGES)
        with open(local_file, 'rb') as f:
            train_images = extract_images(f)

        local_file = base.maybe_download(TRAIN_LABELS, train_dir,
                                         SOURCE_URL + TRAIN_LABELS)
        with open(local_file, 'rb') as f:
            train_labels = extract_labels(f)
        return train_images, train_labels

    else:
        local_file = base.maybe_download(TEST_IMAGES, train_dir,
                                         SOURCE_URL + TEST_IMAGES)
        with open(local_file, 'rb') as f:
            test_images = extract_images(f)

        local_file = base.maybe_download(TEST_LABELS, train_dir,
                                         SOURCE_URL + TEST_LABELS)
        with open(local_file, 'rb') as f:
            test_labels = extract_labels(f)
        return test_images, test_labels
Beispiel #3
0
def download_news20(dest_dir):
    file_name = "20news-18828.tar.gz"
    file_abs_path = base.maybe_download(file_name, dest_dir, NEWS20_URL)
    tar = tarfile.open(file_abs_path, "r:gz")
    extracted_to = os.path.join(dest_dir, "20news-18828")
    if not os.path.exists(extracted_to):
        print("Extracting %s to %s" % (file_abs_path, extracted_to))
        tar.extractall(dest_dir)
        tar.close()
    return extracted_to
 def load_roberta(self):
     os.makedirs(local_path, exist_ok=True)
     model_url = data_url + "/analytics-zoo-data/roberta/roberta.tar"
     model_path = maybe_download("roberta.tar",
                                 local_path, model_url)
     tar = tarfile.open(model_path)
     tar.extractall(path=local_path)
     tar.close()
     model_path = os.path.join(local_path, "roberta/model.xml")
     self.est = Estimator.from_openvino(model_path=model_path)
Beispiel #5
0
def download_glove_w2v(dest_dir):
    file_name = "glove.6B.zip"
    file_abs_path = base.maybe_download(file_name, dest_dir, GLOVE_URL)
    import zipfile
    zip_ref = zipfile.ZipFile(file_abs_path, 'r')
    extracted_to = os.path.join(dest_dir, "glove.6B")
    if not os.path.exists(extracted_to):
        print("Extracting %s to %s" % (file_abs_path, extracted_to))
        zip_ref.extractall(extracted_to)
        zip_ref.close()
    return extracted_to
Beispiel #6
0
def download_reuters(dest_dir):
    """Download pre-processed reuters newswire data

    :argument
        dest_dir: destination directory to store the data

    :return
        The absolute path of the stored data
    """
    file_name = 'reuters.pkl'
    file_abs_path = base.maybe_download(
        file_name, dest_dir,
        'https://s3.amazonaws.com/text-datasets/reuters.pkl')
    return file_abs_path
Beispiel #7
0
def download_imdb(dest_dir):
    """Download pre-processed IMDB movie review data

    :argument
        dest_dir: destination directory to store the data

    :return
        The absolute path of the stored data
    """
    file_name = "imdb_full.pkl"
    file_abs_path = base.maybe_download(
        file_name, dest_dir,
        'https://s3.amazonaws.com/text-datasets/imdb_full.pkl')
    return file_abs_path
    def load_resnet(self):
        input_file_path = os.path.join(resource_path, "orca/learn/resnet_input")
        output_file_path = os.path.join(resource_path, "orca/learn/resnet_output")
        self.input = read_file_and_cast(input_file_path)
        self.output = read_file_and_cast(output_file_path)
        self.input = np.array(self.input).reshape([3, 224, 224])
        self.output = np.array(self.output).reshape([4, 1000])[:1]

        os.makedirs(local_path, exist_ok=True)
        model_url = data_url + "/analytics-zoo-data/openvino2020_resnet50.tar"
        model_path = maybe_download("openvino2020_resnet50.tar",
                                    local_path, model_url)
        tar = tarfile.open(model_path)
        tar.extractall(path=local_path)
        tar.close()
        model_path = os.path.join(local_path, "openvino2020_resnet50/resnet_v1_50.xml")
        self.est = Estimator.from_openvino(model_path=model_path)
Beispiel #9
0
def get_word_index(dest_dir='/tmp/.zoo/dataset',
                   filename='reuters_word_index.pkl'):
    """Retrieves the dictionary mapping word indices back to words.

    # Arguments
        dest_dir: where to cache the data (relative to `~/.zoo/dataset`).
        filename: dataset file name

    # Returns
        The word index dictionary.
    """

    path = base.maybe_download(
        filename, dest_dir,
        'https://s3.amazonaws.com/text-datasets/reuters_word_index.pkl')

    f = open(path, 'rb')

    data = cPickle.load(f, encoding='latin1')

    f.close()
    return data
Beispiel #10
0
def read_data_sets(data_dir):
    """
    Parse or download movielens 1m  data if train_dir is empty.

    :param data_dir: The directory storing the movielens data
    :return: a 2D numpy array with user index and item index in each row 
    """
    WHOLE_DATA = 'ml-1m.zip'
    local_file = base.maybe_download(WHOLE_DATA, data_dir,
                                     SOURCE_URL + WHOLE_DATA)
    zip_ref = zipfile.ZipFile(local_file, 'r')
    extracted_to = os.path.join(data_dir, "ml-1m")
    if not os.path.exists(extracted_to):
        print("Extracting %s to %s" % (local_file, data_dir))
        zip_ref.extractall(data_dir)
        zip_ref.close()
    rating_files = os.path.join(extracted_to, "ratings.dat")

    rating_list = [
        i.strip().split("::") for i in open(rating_files, "r").readlines()
    ]
    movielens_data = np.array(rating_list).astype(int)
    return movielens_data
Beispiel #11
0
def download_data(dest_dir):
    TINYSHAKESPEARE_URL = 'https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt'  # noqa
    file_name = "input.txt"
    file_abs_path = base.maybe_download(file_name, dest_dir, TINYSHAKESPEARE_URL)
    return file_abs_path