Esempio n. 1
0
def load_test_data():
    images = np.zeros(
        shape=[_num_images_test, image_size, image_size, num_channels],
        dtype=float)
    cls = np.zeros(shape=[_num_images_test], dtype=int)

    conn = sqlite3.connect(data_path + 'aflw.sqlite')
    c = conn.cursor()

    result = c.execute(query_string)

    i = 0
    for row in result:
        if i < _num_images_train:
            i += 1

        elif os.path.isfile(data_path + str(row[0])) and (i - _num_images_train
                                                          < _num_images_test):
            msg = "\r- Processing image: {0:>6} / {1}".format(
                i - _num_images_train + 1, _num_images_test)
            sys.stdout.write(msg)
            sys.stdout.flush()

            image, cls[i - _num_images_train] = load_images_and_cls(
                row, file_path_cache_test)
            if image is not None:
                image_h, image_w, dump = image.shape
                images[i - _num_images_train, 0:image_h,
                       0:image_w] = image[0:image_h, 0:image_w]

            i += 1

    print()

    return images, one_hot_encoded(class_numbers=cls, num_classes=num_classes)
def load_test_data():
    # Load all the test-data for the CIFAR-10 data-set.
    # Returns the images, class-numbers and one-hot encoded class-labels.

    images, cls = _load_data(filename='test_batch')

    return images, cls, one_hot_encoded(class_numbers=cls, num_classes=num_classes)
def load_training_data():
    # Pre-allocate the arrays for the images and class-numbers for efficiency.
    # images = np.zeros(shape=[_num_images_train, img_width, img_height, num_channels], dtype=float)
    # cls = np.zeros(shape=[_num_images_train], dtype=int)
    images = []
    cls = []
    a = TRAIN_DICTIONARY.keys()
    for i in range(0, _num_images_train):
        f = os.listdir(os.path.join(data_path, 'training'))[i]
        if f in a:
            img = downSampling(os.path.join(data_path, 'training', f), 350,
                               230)
            arr = sliding(img)
            # print(len(arr))
            # arr = np.array(img)
            # images[i] = arr
            # cls[i] = TRAIN_DICTIONARY[f]
            for x in arr:
                images.append(x)
                cls.append(TRAIN_DICTIONARY[f])

    images = np.array(images)
    cls = np.array(cls, dtype=int)
    print(cls)
    return images, cls, one_hot_encoded(class_numbers=cls,
                                        num_classes=num_classes)
Esempio n. 4
0
def load_test_data():
	images = np.zeros(
		shape=[_num_images_test, image_size, image_size, num_channels], 
		dtype=float)
	cls = np.zeros(
		shape=[_num_images_test],
		dtype=int)

	with open(data_path + 'fer2013.csv', 'rt') as csvfile:
		datareader = csv.reader(csvfile, delimiter =',')

		i = 0
		for row in datareader:
			if row[2] == 'PrivateTest':
				msg = "\r- Processing image: {0:>6} / {1}".format(i+1, _num_images_test)
				sys.stdout.write(msg)
				sys.stdout.flush()

				images[i], cls[i] = load_images_and_cls(row, file_path_cache_test)

				i += 1
			if i == _num_images_train:
				break

	print()

	return images, one_hot_encoded(
		class_numbers=cls,
		num_classes=num_classes)
Esempio n. 5
0
def load_training_data():
    """
    Load all the training-data for the CIFAR-10 data-set.
    The data-set is split into 5 data-files which are merged here.
    Returns the images, class-numbers and one-hot encoded class-labels.
    """

    # Pre-allocate the arrays for the images and class-numbers for efficiency.
    images = np.zeros(shape=[_num_images_train, img_size, img_size, num_channels], dtype=float)
    cls = np.zeros(shape=[_num_images_train], dtype=int)

    # Begin-index for the current batch.
    begin = 0

    # For each data-file.
    for i in range(_num_files_train):
        # Load the images and class-numbers from the data-file.
        images_batch, cls_batch = _load_data(filename="data_batch_" + str(i + 1))

        # Number of images in this batch.
        num_images = len(images_batch)

        # End-index for the current batch.
        end = begin + num_images

        # Store the images into the array.
        images[begin:end, :] = images_batch

        # Store the class-numbers into the array.
        cls[begin:end] = cls_batch

        # The begin-index for the next batch is the current end-index.
        begin = end

    return images, cls, one_hot_encoded(class_numbers=cls, num_classes=num_classes)
Esempio n. 6
0
    def __init__(self, data_dir="../../data/MNIST/", log_dir=None):
        """
        Load the MNIST data-set. Automatically downloads the files
        if they do not already exist locally.

        :param data_dir: Base-directory for downloading files.
        """

        # Copy args to self.
        self.data_dir = data_dir

        # Number of images in each sub-set.
        self.num_train = 55000
        self.num_val = 5000
        self.num_test = 10000

        # Download / load the training-set.
        x_train = self._load_images(filename=filename_x_train)
        y_train_cls = self._load_cls(filename=filename_y_train)

        # Split the training-set into train / validation.
        # Pixel-values are converted from ints between 0 and 255
        # to floats between 0.0 and 1.0.
        self.x_train = x_train[0:self.num_train] / 255.0
        self.x_val = x_train[self.num_train:] / 255.0
        self.y_train_cls = y_train_cls[0:self.num_train]
        self.y_val_cls = y_train_cls[self.num_train:]

        # Download / load the test-set.
        self.x_test = self._load_images(filename=filename_x_test) / 255.0
        self.y_test_cls = self._load_cls(filename=filename_y_test)

        # Convert the class-numbers from bytes to ints as that is needed
        # some places in TensorFlow.
        self.y_train_cls = self.y_train_cls.astype(np.int)
        self.y_val_cls = self.y_val_cls.astype(np.int)
        self.y_test_cls = self.y_test_cls.astype(np.int)

        # Convert the integer class-numbers into one-hot encoded arrays.
        self.y_train = one_hot_encoded(class_numbers=self.y_train_cls,
                                       num_classes=self.num_classes)
        self.y_val = one_hot_encoded(class_numbers=self.y_val_cls,
                                     num_classes=self.num_classes)
        self.y_test = one_hot_encoded(class_numbers=self.y_test_cls,
                                      num_classes=self.num_classes)

        init_logger(log_dir)
Esempio n. 7
0
    def __init__(self, data_dir="data/MNIST/"):
        """
        Load the MNIST data-set. Automatically downloads the files
        if they do not already exist locally.

        :param data_dir: Base-directory for downloading files.
        """

        # Copy args to self.
        self.data_dir = data_dir

        # Number of images in each sub-set.
        self.num_train = 55000
        self.num_val = 5000
        self.num_test = 10000

        # Download / load the training-set.
        x_train = self._load_images(filename=filename_x_train)
        y_train_cls = self._load_cls(filename=filename_y_train)

        # Split the training-set into train / validation.
        # Pixel-values are converted from ints between 0 and 255
        # to floats between 0.0 and 1.0.
        self.x_train = x_train[0:self.num_train] / 255.0
        self.x_val = x_train[self.num_train:] / 255.0
        self.y_train_cls = y_train_cls[0:self.num_train]
        self.y_val_cls = y_train_cls[self.num_train:]

        # Download / load the test-set.
        self.x_test = self._load_images(filename=filename_x_test) / 255.0
        self.y_test_cls = self._load_cls(filename=filename_y_test)

        # Convert the class-numbers from bytes to ints as that is needed
        # some places in TensorFlow.
        self.y_train_cls = self.y_train_cls.astype(np.int)
        self.y_val_cls = self.y_val_cls.astype(np.int)
        self.y_test_cls = self.y_test_cls.astype(np.int)

        # Convert the integer class-numbers into one-hot encoded arrays.
        self.y_train = one_hot_encoded(class_numbers=self.y_train_cls,
                                       num_classes=self.num_classes)
        self.y_val = one_hot_encoded(class_numbers=self.y_val_cls,
                                     num_classes=self.num_classes)
        self.y_test = one_hot_encoded(class_numbers=self.y_test_cls,
                                      num_classes=self.num_classes)
Esempio n. 8
0
def load_test_data():
    """
        Load all the test-data for the CIFAR-10 data-set.
        
        Returns the images, class-numbers and one-hot encoded class-labels.
        """
    
    images, cls = _load_data(filename="test_batch", train_test = False)
    
    return images, cls, one_hot_encoded(class_numbers=cls, num_classes=num_classes)
Esempio n. 9
0
def load_test_data():
    """
    Load all the test-data for the CIFAR-10 data-set.

    Returns the images, class-numbers and one-hot encoded class-labels.
    """

    images, cls = _load_data(filename="test_batch")

    return images, cls, one_hot_encoded(class_numbers=cls, num_classes=num_classes)
Esempio n. 10
0
def load_test_data():
    """
    CIFAR-10 데이터셋에 대한 테스트셋을 불러온다.

    이미지, 클래스 숫자, one-hot 인코딩된 클래스 라벨을 반환한다.
    """

    images, cls = _load_data(filename="test_batch")

    return images, cls, one_hot_encoded(class_numbers=cls, num_classes=num_classes)
    def _load_test_data(self):
        """
        Load all the test-data for the CIFAR-10 data-set.
        Returns the images, class-numbers and one-hot encoded class-labels.
        """

        self.x_test, self.x_test_cls = self._load_data(filename="test_batch")
        # self.x_test_ori = self.x_test.copy()
        self.y_test = one_hot_encoded(class_numbers=self.x_test_cls,
                                      num_classes=self.num_classes)
        self.num_test = len(self.x_test)
Esempio n. 12
0
def load_test_data(data_path1, data_path):
    """
    Returns the images, class-numbers and one-hot encoded class-labels.
    """

    images, cls = _load_data(filename="test_batch.txt",
                             data_path1=data_path1,
                             data_path=data_path)  ###.bin

    return images, cls, one_hot_encoded(class_numbers=cls,
                                        num_classes=num_classes)
 def _load_data(self, filename):
     """
     Load a pickled data-file from the data-set
     and return the converted images (see above) and the class-number
     for each image.
     """
     # Load the pickled data-file.
     data = self._unpickle(filename)
     # print(len(data.traindata))
     images = self._convert_images(data.traindata)
     return images, data.classes, one_hot_encoded(
         class_numbers=data.classes, num_classes=self.num_classes)
    def _load_training_data(self):
        """
        Load all the training-data for the CIFAR-10 data-set.
        The data-set is split into 5 data-files which are merged here.
        Returns the images, class-numbers and one-hot encoded class-labels.
        """

        # Pre-allocate the arrays for the images and class-numbers for efficiency.
        self.images_train = np.zeros(shape=[
            self._num_images_train, self.img_size, self.img_size,
            self.num_channels
        ],
                                     dtype=float)
        self.cls_train = np.zeros(shape=[self._num_images_train], dtype=int)

        # Begin-index for the current batch.
        begin = 0

        # For each data-file.
        for i in range(self._num_files_train):
            # Load the images and class-numbers from the data-file.
            images_batch, cls_batch = self._load_data(filename="data_batch_" +
                                                      str(i + 1))

            # Number of images in this batch.
            num_images = len(images_batch)

            # End-index for the current batch.
            end = begin + num_images

            # Store the images into the array.
            self.images_train[begin:end, :] = images_batch

            # Store the class-numbers into the array.
            self.cls_train[begin:end] = cls_batch

            # The begin-index for the next batch is the current end-index.
            begin = end

        self.labels_train = one_hot_encoded(class_numbers=self.cls_train,
                                            num_classes=self.num_classes)

        # Split into validation data
        self.num_val = int(round(self._num_images_train * 0.2))
        self.x_val = self.images_train[-self.num_val:]
        self.y_val_cls = self.cls_train[-self.num_val:]
        self.y_val = self.labels_train[-self.num_val:]

        self.num_train = self._num_images_train - self.num_val
        self.x_train = self.images_train[:self._num_images_train]
        self.y_train_cls = self.cls_train[:self._num_images_train]
        self.y_train = self.labels_train[:self._num_images_train]
Esempio n. 15
0
def load_testing_data():
    images = np.zeros(
        shape=[_num_images_test, img_width, img_height, num_channels],
        dtype=float)
    cls = np.zeros(shape=[_num_images_test], dtype=int)
    a = TEST_DICTIONARY.keys()
    for i in range(0, _num_images_test):
        f = os.listdir(os.path.join(data_path, 'testing'))[i]
        if f in a:
            img = Image.open(os.path.join(data_path, 'testing', f))
            arr = np.array(img)
            images[i] = arr
            cls[i] = TEST_DICTIONARY[f]
    return images, cls, one_hot_encoded(class_numbers=cls,
                                        num_classes=num_classes)
Esempio n. 16
0
def load_validation_data():
    images = np.zeros(
        shape=[_num_images_validation, img_width, img_height, num_channels],
        dtype=float)
    cls = np.zeros(shape=[_num_images_validation], dtype=int)
    a = VALIDATION_DICTIONARY.keys()
    for i in range(0, _num_images_validation):
        f = os.listdir(os.path.join(data_path, 'validation'))[i]
        if f in a:
            img = Image.open(os.path.join(data_path, 'validation', f))
            arr = np.array(img)
            if (len(arr[0]) == 460): images[i] = arr
            cls[i] = VALIDATION_DICTIONARY[f]
    return images, cls, one_hot_encoded(class_numbers=cls,
                                        num_classes=num_classes)
Esempio n. 17
0
def load_test_data(strNum):
    """
    Load all the test-data for the CIFAR-10 data-set.

    Returns the images, class-numbers and one-hot encoded class-labels.
    """

    filetype = ["_img", "_cls"]
    suffixs = ".pkl"

    images, cls = _load_data(filename="data_batch_" + str(strNum),
                             fileTypes=filetype,
                             suffix=suffixs)

    return images, cls, one_hot_encoded(class_numbers=cls,
                                        num_classes=num_classes)
Esempio n. 18
0
def load_training_data():
    """
    Load all the training-data for the CIFAR-10 data-set.
    CIFAR-10 데이터셋에 대한 학습데이터셋을 불러온다.

    5개의 파일 속에 나뉘어진 이 데이터셋은 여기서 병합된다

    이미지, 클래스 숫자, one-hot 인코딩된 클래스 라벨을 반환한다.

    """

    # 효율성을 위해 이미지와 클래스 숫자에 대한 배열을 미리 할당한다
    images = np.zeros(shape=[_num_images_train, img_size, img_size, num_channels], dtype=float)
    cls = np.zeros(shape=[_num_images_train], dtype=int)

    # 현재 배치에 대한 시작 인덱스
    begin = 0

    # 각 데이터 파일에 대해 반복
    for i in range(_num_files_train):
        # 데이터파일로부터 클래스 숫자와 이미지를 불러온다
        images_batch, cls_batch = _load_data(filename="data_batch_" + str(i + 1))

        # 이 배치 안에 이미지의 수
        num_images = len(images_batch)

        # 현재 배치에 대한 끝 인덱스
        end = begin + num_images

        # 배열속에 이미지를 저장한다
        images[begin:end, :] = images_batch

        # 배열속에 클래스 숫자를 저장한다
        cls[begin:end] = cls_batch

        # 다음 배치를 위해 시작인덱스는 현재의 끝 인덱스이다.
        begin = end

    return images, cls, one_hot_encoded(class_numbers=cls, num_classes=num_classes)
Esempio n. 19
0
def load_training_data():
    """
    Load all the training-data for the CIFAR-10 data-set.

    The data-set is split into 5 data-files which are merged here.

    Returns the images, class-numbers and one-hot encoded class-labels.
    """

    # Pre-allocate the arrays for the images and class-numbers for efficiency.
    images = np.zeros(shape=[_num_images_train, img_size, img_size, num_channels], dtype=float)
    cls = np.zeros(shape=[_num_images_train], dtype=int)

    # Begin-index for the current batch.
    begin = 0

    # For each data-file.
    for i in range(_num_files_train):
        # Load the images and class-numbers from the data-file.
        images_batch, cls_batch = _load_data(filename="data_batch_" + str(i + 1))

        # Number of images in this batch.
        num_images = len(images_batch)

        # End-index for the current batch.
        end = begin + num_images

        # Store the images into the array.
        images[begin:end, :] = images_batch

        # Store the class-numbers into the array.
        cls[begin:end] = cls_batch

        # The begin-index for the next batch is the current end-index.
        begin = end

    return images, cls, one_hot_encoded(class_numbers=cls, num_classes=num_classes)
Esempio n. 20
0
def load_data(filename):
    images, cls = _load_data(filename=filename)

    return images, cls, one_hot_encoded(class_numbers=cls,
                                        num_classes=num_classes)
def load_test_data():

    images, cls = _load_data(filename="test_batch")

    return images, cls, one_hot_encoded(class_numbers=cls,
                                        num_classes=num_classes)