def run_tests(): import problem_unittests as t t.test_folder_path(cifar10_dataset_folder_path) t.test_normalize(normalize) t.test_one_hot_encode(one_hot_encode) t.test_nn_image_inputs(neural_net_image_input) t.test_nn_label_inputs(neural_net_label_input) t.test_nn_keep_prob_inputs(neural_net_keep_prob_input) t.test_con_pool(conv2conv2d_maxpool) t.test_flatten(flatten) t.test_fully_conn(fully_conn) t.test_output(output) t.test_conv_net(conv_net) t.test_train_nn(train_neural_network)
def runCifarTest(self): if isfile(self.floyd_cifar10_location): self.tar_gz_path = self.floyd_cifar10_location else: self.tar_gz_path = 'cifar-10-python.tar.gz' if not isfile(self.tar_gz_path): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar: urlretrieve(self.url, self.tar_gz_path, pbar.hook) if not isdir(self.cifar10_dataset_folder_path): with tarfile.open(self.tar_gz_path) as tar: tar.extractall() tar.close() tests.test_folder_path(self.cifar10_dataset_folder_path, self)
if not isfile('cifar-10-python.tar.gz'): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar: urlretrieve('https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz', 'cifar-10-python.tar.gz', pbar.hook) if not isdir(cifar10_dataset_folder_path): with tarfile.open('cifar-10-python.tar.gz') as tar: tar.extractall() tar.close() tests.test_folder_path(cifar10_dataset_folder_path) # ## Explore the Data # The dataset is broken into batches to prevent your machine from running out of memory. The CIFAR-10 dataset consists of 5 batches, named `data_batch_1`, `data_batch_2`, etc.. Each batch contains the labels and images that are one of the following: # * airplane # * automobile # * bird # * cat # * deer # * dog # * frog # * horse # * ship # * truck # # Understanding a dataset is part of making predictions on the data. Play around with the code cell below by changing the `batch_id` and `sample_id`. The `batch_id` is the id for a batch (1-5). The `sample_id` is the id for a image and label pair in the batch.
self.last_block = block_num if not isfile(tar_gz_path): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar: urlretrieve( 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz', tar_gz_path, pbar.hook) if not isdir(cifar10_dataset_folder_path): with tarfile.open(tar_gz_path) as tar: tar.extractall() tar.close() tests.test_folder_path(cifar10_dataset_folder_path) # ## Explore the Data # The dataset is broken into batches to prevent your machine from running out of memory. The CIFAR-10 dataset consists of 5 batches, named `data_batch_1`, `data_batch_2`, etc.. Each batch contains the labels and images that are one of the following: # * airplane # * automobile # * bird # * cat # * deer # * dog # * frog # * horse # * ship # * truck #
last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile(download_link): with DownloadProgress(unit='B', unit_scale=True, miniters=1, desc='File downloaded') as progress_bar: urlretrieve(download_link, download_folder + 'my_file.tar.gz', progress_bar.hook) tests.test_folder_path( download_folder) # to test if the file downloaded properl def main(): given_checksum_output = urlopen(checksum_file_link).read().decode( 'UTF-8').split()[0] file_checksum_output = os.popen(checksum_format + ' ' + download_folder + '/' + 'my_file.tar.gz').read().split()[0] if file_checksum_output == given_checksum_output: print( 'Congratulations, the file you have downloaded is identical to the source!' ) else: print( 'Sorry, the file you have downloaded is not identical to the source!' )