Exemple #1
0
    def do_training(self):
        dataloader = DataLoader(self.dataset,
                                batch_size=self.args['batch_size_test'],
                                shuffle=False,
                                num_workers=6,
                                drop_last=True)

        metric_l = []
        count = 0

        bar_form = '{l_bar}{bar} | {n_fmt}/{total_fmt} [{remaining} {postfix}]'

        with tqdm(total=len(dataloader),
                  desc='Epochs {}/{}'.format(1, self.args['num_epochs']),
                  bar_format=bar_form) as pbar:

            for train in dataloader:
                sess_array, sess_mask, sessids, lastv = train
                lastv = lastv.to(self.device)
                sess_array, sess_mask = shrink_wrap(sess_array, sess_mask)
                MB = sess_array.shape[0]
                sess_mask = sess_mask.to(self.device)
                sess_array = sess_array.to(self.device)

                SL = sess_mask.sum(1).long()
                ss = np.vstack([
                    to_categorical(sess_array[ii, SL[ii] - 1], self.args['P'])
                    for ii in range(sess_array.shape[0])
                ]) + np.vstack(
                    [to_categorical(lv, self.args['P']) for lv in lastv])
                self.co_counts += np.matmul(ss.T, ss)

                pbar.update(1)

        self.corr = torch.tensor(cov2corr(self.co_counts))
def read_img(pat_id):
    """
    read in the raw images
    :param pat_id: the id if the patient to read in
    :return: the images and the ground truth
    """
    assert os.path.exists(
        '../input/PnpAda_release_data/test_ct_image_n_labels/image_ct_{}.nii.gz'
        .format(pat_id)), "The specified patid doesnot exists: {}".format(
            pat_id)
    assert os.path.exists(
        '../input/PnpAda_release_data/test_ct_image_n_labels/gth_ct_{}.nii.gz'.
        format(pat_id)), "The specified patid doesnot exists: {}".format(
            pat_id)
    img, _, _ = load_nii(
        '../input/PnpAda_release_data/test_ct_image_n_labels/image_ct_{}.nii.gz'
        .format(pat_id))
    mask, _, _ = load_nii(
        '../input/PnpAda_release_data/test_ct_image_n_labels/gth_ct_{}.nii.gz'.
        format(pat_id))
    mask = np.array(mask, dtype=np.int)
    axis = 2
    img = np.moveaxis(img, axis, 0)[:, ::-1, ::-1]
    mask = np.moveaxis(mask, axis, 0)[:, ::-1, ::-1]
    imgs = []
    for i in range(img.shape[0]):
        imgs.append(img[[i - 1, i, (i + 1) % img.shape[0]]])
    masks = to_categorical(mask=mask[:, np.newaxis, ...], num_classes=5)
    return np.array(imgs, dtype=np.float32), masks
Exemple #3
0
    def next_sequence_batch(self):
        """
        Get a new batch of event sequences by chopping a long sequence into
        pieces.
        """

        # Load new sequence if all events of current sequence have been used.
        event_sequence = None
        if self.num_events_of_sample <= \
                self.num_events_per_batch * (self.batch_idx + 1):
            event_sequence = self.next_sequence()

            # Get class label, which is the same for all events in a sequence.
            self.y_b = np.broadcast_to(
                to_categorical([self.labels[self.dvs_sample_idx]],
                               self.num_classes),
                (self.batch_size, self.num_classes))

            # Generate frames from events.
            self.frames_from_sequence = get_frames_from_sequence(
                event_sequence['x'], event_sequence['y'],
                self.num_events_per_sample, self.chip_size, self.target_size)

        # From the current event sequence, extract the next bunch of events and
        # stack them as a batch of small sequences.
        self.x_b_xaddr, self.x_b_yaddr, self.x_b_ts = extract_batch(
            event_sequence['x'], event_sequence['y'], event_sequence['ts'],
            self.batch_size, self.batch_idx, self.num_events_per_sample,
            self.chip_size, self.target_size)

        self.batch_idx += 1

        return self.x_b_xaddr, self.x_b_yaddr, self.x_b_ts, self.y_b
    def __next__(self):
        x_batch = []
        y_batch = []
        z_batch = []

        indices = []
        if self._totalcount >= self._n_samples:
            self._totalcount = 0
            raise StopIteration
        for i in range(self._batch_size):
            indices.append(self._index)
            self._index += 1
            self._totalcount += 1
            self._index = self._index % self._len
            if self._totalcount >= self._n_samples:
                break
        ids_train_batch = self._data.iloc[self._shuffle_indices[indices]]

        for _id in ids_train_batch.values:
            img_path, mask_path, vertex_path = self.get_image_paths(id=_id)

            img, mask, vertex = self.get_images_masks(img_path=img_path,
                                                      mask_path=mask_path,
                                                      vertex_path=vertex_path)
            mask = np.expand_dims(mask, axis=-1)
            assert mask.ndim == 3

            x_batch.append(img)
            y_batch.append(mask)
            z_batch.append(vertex)

        # min-max batch normalisation
        if self._apply_aug:
            if self._aug2:
                x_batch, y_batch = ImageProcessor.augmentation2(
                    np.array(x_batch), np.array(y_batch))
            else:
                x_batch, y_batch = ImageProcessor.augmentation(
                    np.array(x_batch), np.array(y_batch))
        x_batch = np.array(x_batch, np.float32) / 255.
        if self._crop_size:
            x_batch = ImageProcessor.crop_volume(x_batch,
                                                 crop_size=self._crop_size //
                                                 2)
            y_batch = ImageProcessor.crop_volume(np.array(y_batch),
                                                 crop_size=self._crop_size //
                                                 2)
        if self._channel == "channel_first":
            x_batch = np.moveaxis(x_batch, -1, 1)
        y_batch = to_categorical(np.array(y_batch),
                                 num_classes=4,
                                 channel=self._channel)
        z_batch = np.array(z_batch, np.float32) / 255.

        return x_batch, y_batch, z_batch
Exemple #5
0
    def fit(self, X, y):
        y = to_categorical(y)

        y_pred = np.zeros_like(y)

        for tree in tqdm(self.trees):
            y_and_pred = np.concatenate((y, y_pred), axis=1)
            tree.fit(X, y_and_pred)
            update_y_pred = tree.predict(X)

            y_pred -= np.multiply(self.learning_rate, update_y_pred)
def main():
    optimizer = Adam()
    data = datasets.load_digits()
    X = data.data
    y = data.target

    y = to_categorical(y.astype("int"))

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)

    X_train = X_train.reshape((-1, 1, 8, 8))
    X_test = X_test.reshape((-1, 1, 8, 8))

    clf = NeuralNetwork(optimizer=optimizer, loss=CrossEntropy, validation_data=(X_test, y_test))
    clf.add(Conv2D(n_filters=16, filter_shape=(3, 3), stride=1, input_shape=(1, 8, 8), padding='same'))
    clf.add(Activation('relu'))
    clf.add(Dropout(0.25))
    clf.add(BatchNormalization())
    clf.add(Conv2D(n_filters=32, filter_shape=(3, 3), stride=1, padding='same'))
    clf.add(Activation('relu'))
    clf.add(Dropout(0.25))
    clf.add(BatchNormalization())
    clf.add(Flatten())
    clf.add(Dense(256))
    clf.add(Activation('relu'))
    clf.add(Dropout(0.4))
    clf.add(BatchNormalization())
    clf.add(Dense(10))
    clf.add(Activation('softmax'))

    clf.summary(name="ConvNet")

    train_err, val_err = clf.fit(X_train, y_train, n_epochs=50, batch_size=256)

    # Training and validation error plot
    n = len(train_err)
    training, = plt.plot(range(n), train_err, label="Training Error")
    validation, = plt.plot(range(n), val_err, label="Validation Error")
    plt.legend(handles=[training, validation])
    plt.title("Error Plot")
    plt.ylabel('Error')
    plt.xlabel('Iterations')
    plt.show()

    _, accuracy = clf.test_on_batch(X_test, y_test)
    print("Accuracy:", accuracy)

    y_pred = np.argmax(clf.predict(X_test), axis=1)
    X_test = X_test.reshape(-1, 8*8)
    # Reduce dimension to 2D using PCA and plot the results
    Plot().plot_in_2d(X_test, y_pred, title="Convolutional Neural Network", accuracy=accuracy, legend_labels=range(10))
Exemple #7
0
    def fit(self, X, y, epochs=20, batch_size=30):
        def create_dataset(dataset, look_back=1):
            dataX, dataY = [], []
            for i in range(len(dataset) - look_back):
                a = dataset[i:(i + look_back), 0]
                dataX.append(a)
                dataY.append(dataset[i + look_back, 0])
            return np.array(dataX), np.array(dataY)

        X = self.scaler.fit_transform(X)
        X = X.reshape(X.shape[0], 1, X.shape[1])
        y, oh_dict = to_categorical(y)
        self.oh_dict = oh_dict
        self.model.fit(X, y, epochs=epochs, batch_size=batch_size)
def main():

    data = datasets.load_digits()
    X = data.data
    y = data.target

    y = to_categorical(y.astype("int"))
    n_hidden = 512

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)
    clf = MultilayerPerceptron(n_hidden)
    clf.fit(X_train, y_train)
    y_pred = np.argmax(clf.predict(X_test), axis=1)
    y_test = np.argmax(y_test, axis=1)

    accuracy = accuracy_score(y_test, y_pred)
    print(f"Accuracy: {accuracy}")
    Plot().plot_in_2d(X_test, y_pred, title="perceptron", accuracy=accuracy, legend_labels=np.unique(y))
Exemple #9
0
def main():
    data = datasets.load_digits()
    X = normalize(data.data)
    y = data.target

    # data preprocess: One-hot encoding of nominal y-values
    y = to_categorical(y)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)

    clf = Perceptron(n_iterations=5000, learning_rate=0.001, loss=CrossEntropy, activation_function=Sigmoid)
    clf.fit(X_train, y_train)

    y_pred = np.argmax(clf.predict(X_test), axis=1)
    y_test = np.argmax(y_test, axis=1)

    accuracy = accuracy_score(y_test, y_pred)
    print(f"Accuracy: {accuracy}")

    Plot().plot_in_2d(X_test, y_pred, title="perceptron", accuracy=accuracy, legend_labels=np.unique(y))
def get_images_masks(img_paths, mask_paths, crop_size=224, aug=False):
    imgs, masks = [], []
    for img_path, mask_path in zip(img_paths, mask_paths):
        img = cv2.imread(img_path)
        # img = cv2.resize(img, (self._width, self._height), interpolation=cv2.INTER_AREA)

        mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
        mask = np.where(mask == 85, 1, mask)
        mask = np.where(mask == 212, 2, mask)
        mask = np.where(mask == 255, 3, mask)
        # mask = cv2.resize(mask, (self._width, self._height), interpolation=cv2.INTER_AREA)
        imgs.append(img)
        masks.append(mask)
    imgs = np.array(imgs, dtype=np.float32) / 255.
    masks = np.array(masks)
    if crop_size:
        imgs = ImageProcessor.crop_volume(imgs, crop_size=crop_size // 2)
        masks = ImageProcessor.crop_volume(masks, crop_size=crop_size // 2)
    imgs = np.moveaxis(imgs, -1, 1)
    masks = to_categorical(np.array(masks), num_classes=4)

    return imgs, masks
Exemple #11
0
 def fit(self, X, y, epochs=20, batch_size=30):
     X = self.scaler.fit_transform(X)
     X = X.reshape(X.shape[0], 1, X.shape[1])
     y, oh_dict = to_categorical(y)
     self.oh_dict = oh_dict
     self.model.fit(X, y, epochs=epochs, batch_size=batch_size)
Exemple #12
0
 def fit(self, X, y):
     y = to_categorical(y)
     super(GradientBoostingClassifier, self).fit(X, y)
    def __next__(self):
        images, masks, verts = [], [], []

        indices = []
        if self._totalcount >= self._n_samples:
            self._totalcount = 0
            raise StopIteration
        for i in range(self._batch_size):
            indices.append(self._index)
            self._index += 1
            self._totalcount += 1
            self._index = self._index % self._len
            if self._totalcount >= self._n_samples:
                break
        ids_train_batch = self._data.iloc[self._shuffle_indices[indices]]

        for _id in ids_train_batch.values:
            img_path, mask_path, vertex_path = self.get_image_paths(id=_id)
            img, mask, vertex = self.get_images_masks(img_path=img_path,
                                                      mask_path=mask_path,
                                                      vertex_path=vertex_path)
            if self._match_hist:
                img = match_histograms(img,
                                       self._reference_img,
                                       multichannel=True)

            assert mask.ndim == 3

            images.append(img)
            masks.append(mask)
            verts.append(vertex)
        images = np.array(images)
        if self._aug == 'heavy' or self._aug == 'light':
            img_min = images.min()
            img_max = images.max()
            images = (images - img_min) * 255. / (img_max - img_min)
            images = np.array(images, dtype=np.uint8)
            if self._aug == 'heavy':
                images, masks = augmentation(images, masks)
            else:
                images, masks = light_aug(images, masks, segmap=self._segmap)
            images = img_min + images.astype(
                np.float32) * (img_max - img_min) / 255.
            masks = np.array(masks)
            if self._vert:
                verts = []
                for mask in masks:
                    try:
                        vertex = npy2point_datagenerator(mask)
                        verts.append(vertex)
                    except:
                        print('error when converting mask to pointcloud')
                        exit()

        if self._crop_size:
            images = ImageProcessor.crop_volume(images,
                                                crop_size=self._crop_size // 2)
            masks = ImageProcessor.crop_volume(np.array(masks),
                                               crop_size=self._crop_size // 2)
        if self._channel == "channel_first":
            images = np.moveaxis(images, -1, 1)
        masks = to_categorical(np.array(masks),
                               num_classes=5,
                               channel=self._channel)
        verts = np.array(verts, np.float32) / 255.

        return images, masks, verts
Exemple #14
0
    # normalize x data with mean and standard deviation
    x_scaler = MinMaxScaler(feature_range=(0.0001, 1))
    y_scaler = MinMaxScaler(feature_range=(0.0001, 1))

    data_shape = X.shape
    X = x_scaler.fit_transform(X.reshape(
        (data_shape[0], -1))).reshape(data_shape)

    # convert y data to categorical
    parameters['num_inputs'] = data_shape[1]
    parameters['num_features'] = data_shape[-1]

    # y = y_scaler.fit_transform(y[:, 1:2])
    # y = to_binary(y[:, 1], threshold=0.1); print( 1e2 * len(y[y==1]) / len(y) )
    _, y = get_categories(y[:, 1], classes=20)
    y = to_categorical(y)
    parameters['num_classes'] = y.shape[-1]

    # split data for validation and testing
    x_train, x_test, y_train, y_test = train_test_split(
        X,
        y,
        test_size=parameters['test_split'],
        shuffle=parameters['test_shuffle'])

    print(y_test, x_test[:, 0].max())

    # convert data to categorical: (see treshholds later)
    print('Convert class vector to binary class matrix '
          '(for use with categorical_crossentropy)')