Exemple #1
0
    def make_minibatch(self, samples):
        # Make the next minibatch
        source = [sample[0] for sample in samples]
        target = [sample[1] for sample in samples]

        def transform(x, w=False):
            return np.reshape(x, (-1, self.seqlength, 1) if w else (-1, self.seqlength))

        source = transform(source)
        target = transform(target)
        input1, label1, input2, label2, word1, word2 = [], [], [], [], [], []
        for i in range(len(source)):
            for w in range(len(source[i])):
                input1.append(self.word_position[source[i][w]][0])
                input2.append(self.word_position[source[i][w]][1])
                label1.append(self.word_position[source[i][w]][1])
                label2.append(self.word_position[target[i][w]][0])
                word1.append(source[i][w])
                word2.append(target[i][w])
        return \
            cntk.Value.one_hot(batch=transform(input1), num_classes=self.vocab_base), \
            cntk.Value.one_hot(batch=transform(input2), num_classes=self.vocab_base), \
            cntk.Value.one_hot(batch=transform(label1), num_classes=self.vocab_base), \
            cntk.Value.one_hot(batch=transform(label2), num_classes=self.vocab_base), \
            cntk.Value(batch=np.asarray(transform(word1, True), dtype=np.float32)), \
            cntk.Value(batch=np.asarray(transform(word2, True), dtype=np.float32))
def test_value_properties():
    ndav = C.NDArrayView((1, 2, 3), np.float32, device=C.cpu())
    val = C.Value(batch=ndav)

    dev = val.device
    assert isinstance(dev, C.DeviceDescriptor)
    assert str(dev) == 'CPU'

    assert val.is_read_only == False

    assert val.is_sparse == False

    assert val.dtype == np.float32
Exemple #3
0
def test_sanitize_number_of_parameters():
    i1 = C.input_variable(shape=(1, ), needs_gradient=True)
    i2 = C.input_variable(shape=(1, ), needs_gradient=True)
    res = i1 + i2

    with pytest.raises(ValueError):
        val = C.Value(
            C.NDArrayView.from_dense(np.asarray([[[1]]], dtype=np.float32)))
        res.forward(val)

    with pytest.raises(ValueError):
        res.eval()

    with pytest.raises(ValueError):
        res.forward({i1: [[1]]})
Exemple #4
0
    def benchmark(self):
        # Common suffix
        suffix = "cntk_{}_{}by{}_{}".format(self.dataset, self.resize_size[0], self.resize_size[1], self.preprocessing)

        # Construct model, io and metrics
        cntk_input = C.input_variable((3, self.resize_size[0], self.resize_size[1]), np.float32)
        cntk_output = C.input_variable((self.class_num), np.float32)
        self.constructCNN(cntk_input)
        cntk_cost = Softmax(self.cntk_model, cntk_output)
        cntk_error = ClassificationError(self.cntk_model, cntk_output)

        # Prepare training/validation/testing sets
        cntk_train_x = np.ascontiguousarray(np.vstack([np.expand_dims(x, axis=0).transpose([0,3,1,2]).astype('float32')/255 for x in self.x_train]), dtype=np.float32)
        cntk_valid_x = np.ascontiguousarray(np.vstack([np.expand_dims(x, axis=0).transpose([0,3,1,2]).astype('float32')/255 for x in self.x_valid]), dtype=np.float32)
        cntk_test_x = np.ascontiguousarray(np.vstack([np.expand_dims(x, axis=0).transpose([0,3,1,2]).astype('float32')/255 for x in self.testImages]), dtype=np.float32)

        cntk_train_y = C.one_hot(C.input_variable(1), self.class_num, sparse_output=False)(np.expand_dims(np.array(self.y_train, dtype='f'), axis=1))
        cntk_valid_y = C.one_hot(C.input_variable(1), self.class_num, sparse_output=False)(np.expand_dims(np.array(self.y_valid, dtype='f'), axis=1))
        cntk_test_y = C.one_hot(C.input_variable(1), self.class_num, sparse_output=False)(np.expand_dims(np.array(self.testLabels, dtype='f'), axis=1))


        # Trainer and mb source
        cntk_learner = SGD(self.cntk_model.parameters, lr=0.01, momentum=0.9, unit_gain=False, use_mean_gradient=True) # To compare performance with other frameworks
        cntk_trainer = C.Trainer(self.cntk_model, (cntk_cost, cntk_error), cntk_learner)
        cntk_train_src = C.io.MinibatchSourceFromData(dict(x=C.Value(cntk_train_x), y=C.Value(cntk_train_y)), max_samples=len(cntk_train_x))
        cntk_valid_src = C.io.MinibatchSourceFromData(dict(x=C.Value(cntk_valid_x), y=C.Value(cntk_valid_y)), max_samples=len(cntk_valid_x))
        cntk_test_src = C.io.MinibatchSourceFromData(dict(x=C.Value(cntk_test_x), y=C.Value(cntk_test_y)), max_samples=len(cntk_test_x))

        # Mapping for training, validation and testing
        def getMap(src, bs):
            batch = src.next_minibatch(bs)
            return {
                cntk_input: batch[src.streams['x']],
                cntk_output: batch[src.streams['y']]
            }

        # Create log file
        train_batch_count = len(self.x_train) // self.batch_size + 1
        valid_batch_count = len(self.x_valid) // self.batch_size + 1
        test_batch_count = len(self.testImages) // self.batch_size + 1
        filename = "./saved_data/{}/{}/callback_data_{}.h5".format(self.network_type, self.devices[0], suffix)
        f = DLHelper.init_h5py(filename, self.epoch_num, train_batch_count * self.epoch_num)

        # Start training
        try:
            batch_count = 0
            f['.']['time']['train']['start_time'][0] = time.time()

            # Each epoch
            for epoch in range(0, self.epoch_num):
                cntk_train_src.restore_from_checkpoint({'cursor': 0, 'total_num_samples': 0})
                cntk_valid_src.restore_from_checkpoint({'cursor': 0, 'total_num_samples': 0})
                cntk_test_src.restore_from_checkpoint({'cursor': 0, 'total_num_samples': 0})

                # Each batch
                for i in range(train_batch_count):
                    batch_count += 1

                    # Read a mini batch from the training data file
                    data = getMap(cntk_train_src, self.batch_size)

                    # Train a batch
                    start = default_timer()
                    cntk_trainer.train_minibatch(data)

                    # Save training loss
                    training_loss = cntk_trainer.previous_minibatch_loss_average

                    # Save batch time. Prevent asynchronous
                    train_batch_time = default_timer() - start
                    f['.']['time']['train_batch'][batch_count-1] = train_batch_time

                    # Continue saving training loss
                    eval_error = cntk_trainer.previous_minibatch_evaluation_average
                    f['.']['cost']['train'][batch_count-1] = np.float32(training_loss)
                    f['.']['accuracy']['train'][batch_count-1] = np.float32((1.0 - eval_error) * 100.0)

                    if i % 30 == 0: # Print per 30 batches
                        print("Epoch: {0}, Minibatch: {1}, Loss: {2:.4f}, Error: {3:.2f}%".format(epoch, i, training_loss, eval_error * 100.0))

                # Save batch marker
                f['.']['time_markers']['minibatch'][epoch] = np.float32(batch_count)

                # Validation
                validation_loss = 0
                validation_error = 0
                for j in range(valid_batch_count):
                    # Read a mini batch from the validation data file
                    data = getMap(cntk_valid_src, self.batch_size)

                    # Valid a batch
                    batch_x, batch_y = data[cntk_input].asarray(), data[cntk_output].asarray()
                    validation_loss += cntk_cost(batch_x, batch_y).sum()
                    validation_error += cntk_trainer.test_minibatch(data) * len(batch_x)

                validation_loss /= len(self.x_valid)
                validation_error /= len(self.x_valid)

                # Save validation loss for the whole epoch
                f['.']['cost']['loss'][epoch] = np.float32(validation_loss)
                f['.']['accuracy']['valid'][epoch] = np.float32((1.0 - validation_error) * 100.0)
                print("[Validation]")
                print("Epoch: {0}, Loss: {1:.4f}, Error: {2:.2f}%\n".format(epoch, validation_loss, validation_error * 100.0))

            # Save related params
            f['.']['time']['train']['end_time'][0] = time.time() # Save training time
            f['.']['config'].attrs["total_minibatches"] = batch_count
            f['.']['time_markers'].attrs['minibatches_complete'] = batch_count

            # Testing
            test_error = 0
            for j in range(test_batch_count):
                # Read a mini batch from the validation data file
                data = getMap(cntk_test_src, self.batch_size)

                # Valid a batch
                test_error += cntk_trainer.test_minibatch(data) * data[cntk_input].num_samples

            test_error /= len(self.testImages)

            f['.']['infer_acc']['accuracy'][0] = np.float32((1.0 - test_error) * 100.0)
            print("Accuracy score is %f" % (1.0 - test_error))

            self.cntk_model.save("./saved_models/{}/{}/{}.pth".format(self.network_type, self.devices[0], suffix))

        except KeyboardInterrupt:
            pass
        except Exception as e:
            raise e
        finally:
            print("Close file descriptor")
            f.close()
Exemple #5
0
    def next_minibatch(self,
                       mb_size_in_samples,
                       number_of_workers=1,
                       worker_rank=0,
                       device=None):
        ''' Worker loads TIF images and extracts samples from them '''

        if not self.already_loaded_images[worker_rank]:
            # It's time to load all images into memory. This can take time, so
            # we log our progress to stdout
            self.already_loaded_images[worker_rank] = True
            for i, tile_name in enumerate(self.tile_names[worker_rank]):
                try:
                    naip_image, landcover_image = load_image_pair(tile_name)
                    self.naip_images[worker_rank].append(naip_image)
                    self.landcover_images[worker_rank].append(landcover_image)
                    print('Worker {} loaded its {}th image'.format(
                        worker_rank, i))
                except ValueError:
                    print('Failed to load TIF pair: {}'.format(tile_name))
                    pass
            print('Worker {} completed image loading'.format(worker_rank))

        if self.current_mb_indices[worker_rank] == 0:
            # It's time to advance the image index
            self.current_image_indices[worker_rank] = (
                self.current_image_indices[worker_rank] + 1) % len(
                    self.naip_images[worker_rank])
        idx = self.current_image_indices[worker_rank]

        # Feature data have dimensions: num_color_channels x block size
        #								x block size
        # Label data have dimensions: block_size x block_size
        features = np.zeros((mb_size_in_samples, self.num_color_channels,
                             self.block_size, self.block_size),
                            dtype=np.float32)
        labels = np.zeros(
            (mb_size_in_samples, self.block_size, self.block_size),
            dtype=np.float32)

        # Randomly select subsets of the image for training
        w, h = self.naip_images[worker_rank][idx].shape[1:]
        samples_retained = 0
        while samples_retained < mb_size_in_samples:
            i = np.random.randint(0, w - self.block_size)
            j = np.random.randint(0, h - self.block_size)
            bounds = (i, j, self.block_size, self.block_size)
            label_slice = get_cropped_data(
                self.landcover_images[worker_rank][idx], bounds, False)
            if interesting_patch(label_slice):
                features[samples_retained, :, :, :] = get_cropped_data(
                    self.naip_images[worker_rank][idx], bounds, True)
                labels[samples_retained, :, :] = label_slice
                samples_retained += 1

        # Convert the label data to one-hot, then convert arrays to Values
        f_data = cntk.Value(batch=features)
        l_data = cntk.Value(batch=self.oh_tf.eval({self.x: labels}))

        result = {
            self.fsi:
            cntk.io.MinibatchData(f_data, mb_size_in_samples,
                                  mb_size_in_samples, False),
            self.lsi:
            cntk.io.MinibatchData(l_data, mb_size_in_samples,
                                  mb_size_in_samples, False)
        }

        # Minibatch collection complete: update minibatch index so we know
        # how many more minibatches to collect using this TIFF pair
        self.current_mb_indices[worker_rank] = (
            1 +
            self.current_mb_indices[worker_rank]) % self.minibatches_per_image
        return (result)
    def next_minibatch(self, mb_size, patch_freq_per_class):
        features = np.zeros((mb_size, self.num_color_channels, self.block_size,
                             self.block_size),
                            dtype=np.float32)
        landcover = np.zeros((mb_size, self.num_landcover_classes,
                              self.block_size, self.block_size),
                             dtype=np.float32)
        lc_weight_map = np.zeros(
            (mb_size, 1, self.block_size, self.block_size), dtype=np.float32)
        masks = np.zeros(
            (mb_size, self.num_nlcd_classes, self.block_size, self.block_size),
            dtype=np.float32)
        interval_centers = np.zeros(
            (mb_size, self.num_nlcd_classes, self.num_landcover_classes),
            dtype=np.float32)
        interval_radii = np.zeros(
            (mb_size, self.num_nlcd_classes, self.num_landcover_classes),
            dtype=np.float32)

        # Sample patches according to labels
        ins_id = 0
        while ins_id < mb_size:
            self.class_iter_idx = (self.class_iter_idx +
                                   1) % len(patch_freq_per_class)
            patch_freq = patch_freq_per_class[self.class_iter_idx]

            self.freq_control_arr[self.class_iter_idx] += patch_freq
            while self.freq_control_arr[
                    self.class_iter_idx] > 1 and ins_id < mb_size:
                self.freq_control_arr[self.class_iter_idx] -= 1

                if self.num_highres is None:
                    if len(self.patch_list_for_all_classes[
                            self.class_iter_idx]) == 0:
                        continue
                    naip_slice, nlcd_slice, lc_slice, nlcd_class_count = \
                        self.sample_slices_from_list(self.patch_list_for_all_classes[self.class_iter_idx])
                else:
                    CALEB_PROB = 0.1
                    if np.random.rand(
                    ) < CALEB_PROB:  # sample from the highres patches with probability CALEB_PROB, else sample from superres
                        if len(self.highres_patch_list_for_all_classes[
                                self.class_iter_idx]) == 0:
                            continue
                        naip_slice, nlcd_slice, lc_slice, nlcd_class_count = \
                            self.sample_slices_from_list(self.highres_patch_list_for_all_classes[self.class_iter_idx])
                    else:
                        if len(self.superres_patch_list_for_all_classes[
                                self.class_iter_idx]) == 0:
                            continue
                        naip_slice, nlcd_slice, lc_slice, nlcd_class_count = \
                            self.sample_slices_from_list(self.superres_patch_list_for_all_classes[self.class_iter_idx])

                naip_slice = color_aug(naip_slice)
                self.class_count_arr += nlcd_class_count

                features[ins_id, :, :, :] = naip_slice
                landcover[ins_id, :, :, :] = self.to_one_hot(
                    lc_slice, self.num_landcover_classes)
                lc_weight_map[ins_id, :, :, :] = 1.0
                masks[ins_id, :, :, :] = self.to_one_hot(
                    nlcd_slice, self.num_nlcd_classes)
                interval_centers[ins_id, :, :] = nlcd_dist
                interval_radii[ins_id, :, :] = nlcd_var
                ins_id += 1

                # TODO: Usually 50% patches have high-res labels. The random sampling method below
                # will sample out a patch with high-res labels 50% of the time.
                # But since you (probably) have finished the other to-do item in this source code,
                # there might be just 1 to 256 patches with high-res labels.
                # The probablity of those patches being sampled out is very small.
                # So, change code here to make sure that with X% probability, a patch with highres labels
                # will be sampled out. Otherwise the little amount of high-res data will be effectively ignored.

                #naip_slice, nlcd_slice, lc_slice, nlcd_class_count = \
                #        self.sample_slices_from_list(self.patch_list_for_all_classes[self.class_iter_idx])

        self.print_class_count()
        result = {
            self.fsi:
            cntk.io.MinibatchData(cntk.Value(batch=features), mb_size, mb_size,
                                  False),
            self.lsi:
            cntk.io.MinibatchData(cntk.Value(batch=landcover), mb_size,
                                  mb_size, False),
            self.lwi:
            cntk.io.MinibatchData(cntk.Value(batch=lc_weight_map), mb_size,
                                  mb_size, False),
            self.msi:
            cntk.io.MinibatchData(cntk.Value(batch=masks), mb_size, mb_size,
                                  False),
            self.csi:
            cntk.io.MinibatchData(cntk.Value(batch=interval_centers), mb_size,
                                  mb_size, False),
            self.rsi:
            cntk.io.MinibatchData(cntk.Value(batch=interval_radii), mb_size,
                                  mb_size, False)
        }

        return result