Exemple #1
0
def train_one_epoch(sess, n_epoch, saver):
    epoch_loss = utils.AverageMeter()
    np.random.shuffle(trainPerm)
    for i in range(trainPerm.shape[0] // batch_size):
        batchTrain = np.zeros(
            [batch_size, patch_size, patch_size, NUM_BANDS_IN],
            dtype=np.float32)
        batchTarget = np.zeros([batch_size, patch_size, patch_size, 1],
                               dtype=np.float32)
        for j in range(batch_size):
            id_n = trainPerm[i * batch_size + j] // num_patches
            residual = trainPerm[i * batch_size + j] % num_patches
            id_x = patch_stride * (residual % num_patches_x)
            id_y = patch_stride * (residual // num_patches_x)
            image = trainData[id_n][id_x:id_x + patch_size,
                                    id_y:id_y + patch_size, :]
            augmentationType = np.random.randint(0, 6)
            batchTrain[j] = utils.augmentation(image, augmentationType)
            image = trainTarget[id_n][id_x:id_x + patch_size,
                                      id_y:id_y + patch_size, :]
            batchTarget[j] = utils.augmentation(image, augmentationType)
        _, batchLoss, g_step, curr_lr = sess.run(
            [train_op, loss, global_steps, curr_lr_op],
            feed_dict={
                phTrainInput: batchTrain,
                phTrainTarget: batchTarget
            })
        epoch_loss.update(batchLoss)
    total_loss = epoch_loss.avg
    saver.save(sess, str(ckpt_dir / f'{model_name}-{n_epoch}.ckpt'))
    return total_loss, curr_lr
Exemple #2
0
def val_one_epoch(sess, n_epoch, saver, best_acc):
    epoch_loss = utils.AverageMeter()
    for i in range(valPerm.shape[0] // batch_size):
        batchTrain = np.zeros(
            [batch_size, patch_size, patch_size, NUM_BANDS_IN],
            dtype=np.float32)
        batchTarget = np.zeros([batch_size, patch_size, patch_size, 1],
                               dtype=np.float32)
        for j in range(batch_size):
            id_n = valPerm[i * batch_size + j] // num_patches
            residual = valPerm[i * batch_size + j] % num_patches
            id_x = patch_stride * (residual % num_patches_x)
            id_y = patch_stride * (residual // num_patches_x)
            image = trainData[id_n][id_x:id_x + patch_size,
                                    id_y:id_y + patch_size, :]
            augmentationType = np.random.randint(0, 6)
            batchTrain[j] = utils.augmentation(image, augmentationType)
            image = trainTarget[id_n][id_x:id_x + patch_size,
                                      id_y:id_y + patch_size, :]
            batchTarget[j] = utils.augmentation(image, augmentationType)
        batchLoss = sess.run(loss,
                             feed_dict={
                                 phTrainInput: batchTrain,
                                 phTrainTarget: batchTarget
                             })
        epoch_loss.update(batchLoss)
    total_loss = epoch_loss.avg
    if total_loss <= best_acc:
        best_acc = total_loss
        saver.save(sess, str(best_ckpt_file))
    return total_loss, best_acc
def processor(sample):
    data, labels, training = sample
    data = augmentation(process(data))
    labels = torch.LongTensor(labels)
    labels = torch.sparse.torch.eye(num_classes).index_select(dim=0,
                                                              index=labels)
    data = Variable(data).cuda()
    classes = F.softmax(model(data).cuda(), dim=1)
    labels = Variable(labels, requires_grad=False).cuda()
    loss = capsule_loss(classes, labels)
    return loss, classes
Exemple #4
0
    def __data_generation(self, index):

        'Generates data containing batch_size samples'  # X : (n_samples, *dims. n_channels)
        # Generate data & Store sample
        # Assign probablity and parameters

        #import code; code.interact(local=dict(globals(), **locals()))
        rand_p = random.random()

        # X_img
        #X_whole = Image.fromarray(np.load(list(self.total_images_dic.keys())[index])).resize((header.resize, header.resize))
        #X_whole = np.asarray(X_whole)

        #X_whole_mask = Image.fromarray(np.load(list(self.total_images_dic.keys())[index].split('.image.npy')[0] + '.mask.npy')).resize((
        #                                                                                                                               header.resize, header.resize))
        #X_whole_mask = np.round(np.asarray(X_whole_mask))

        X_masked = np.load(list(self.total_images_dic.keys())[index])['image']
        h_whole = X_masked.shape[0]  # original w
        w_whole = X_masked.shape[1]  # original h

        non_zero_list = np.nonzero(X_masked)

        non_zero_row = random.choice(
            non_zero_list[0])  # random non-zero row index
        non_zero_col = random.choice(
            non_zero_list[1])  # random non-zero col index

        X_patch = X_masked[
            int(max(0, non_zero_row - (header.img_size / 2))
                ):int(min(h_whole, non_zero_row + (header.img_size / 2))),
            int(max(0, non_zero_col - (header.img_size / 2))
                ):int(min(w_whole, non_zero_col + (header.img_size / 2)))]

        X_patch_img = self.data_transforms(
            augmentation(Image.fromarray(X_patch),
                         rand_p=rand_p,
                         mode=self.mode))
        X_patch_img_ = np.squeeze(np.asarray(X_patch_img))

        X_patch_1 = np.expand_dims(X_patch_img_, axis=0)
        X_patch_2 = np.expand_dims(X_patch_img_, axis=0)
        X_patch_3 = np.expand_dims(X_patch_img_, axis=0)

        X_ = np.concatenate((X_patch_1, X_patch_2, X_patch_3), axis=0)
        X = torch.from_numpy(X_)

        # Store classes
        y = list(self.total_images_dic.values())[index]

        return X, y
def processor(sample):
    data, labels, training = sample
    data = augmentation(process(data))
    labels = torch.LongTensor(labels)
    labels = torch.sparse.torch.eye(num_classes).index_select(dim=0,
                                                              index=labels)
    data = Variable(data).cuda()
    labels = Variable(labels).cuda()
    if training:
        classes, reconstruction = model(data, labels)
    else:
        classes, reconstruction = model(data)
    loss = capsule_loss(data, labels, classes, reconstruction)
    return loss, classes
Exemple #6
0
def community_detection(graph,
                        null_samples,
                        num_samples=20,
                        small_criterion=4):
    augmented_g = augmentation(graph)
    communities = get_partition(augmented_g)
    graph = compute_first_density(graph, communities)
    graph = compute_second_density(graph, communities)
    graph = compute_third_density(graph, communities,
                                  null_samples[:num_samples])
    graph = small_community_feature(graph, communities, small_criterion)
    graph = compute_first_strength(graph, communities)
    graph = compute_second_strength(graph, communities)
    return graph
Exemple #7
0
    def __getitem__(self, index):
        #open image and label
        img = Image.open(self.images[index])
        label = Image.open(self.labels[index]).convert("RGB")

        #resize image and label, then crop them
        scale = random.choice(self.scale)
        scale = (int(self.shape[0] * scale), int(self.shape[1] * scale))

        seed = random.random()
        img = transforms.Resize(scale, Image.BILINEAR)(img)
        img = RandomCrop(self.shape, seed, pad_if_needed=True)(img)
        img = np.array(img)

        label = transforms.Resize(scale, Image.NEAREST)(label)
        label = RandomCrop(self.shape, seed, pad_if_needed=True)(label)
        label = np.array(label)

        #translete to CamVid color palette
        label = self.__toCamVid(label)

        #apply augmentation
        img, label = augmentation(img, label)
        if random.randint(0, 1) == 1:
            img = augmentation_pixel(img)

        img = Image.fromarray(img)
        img = self.to_tensor(img).float()

        #computing losses
        if self.loss == 'dice':
            # label -> [num_classes, H, W]
            label = one_hot_it_v11_dice(label,
                                        self.label_info).astype(np.uint8)

            label = np.transpose(label, [2, 0, 1]).astype(np.float32)
            label = torch.from_numpy(label)

            return img, label

        elif self.loss == 'crossentropy':
            label = one_hot_it_v11(label, self.label_info).astype(np.uint8)
            label = torch.from_numpy(label).long()

            return img, label
Exemple #8
0
def processor(sample):
    data, labels, training = sample

    data = utils.augmentation(data.unsqueeze(1).float() / 255.0)
    labels = torch.eye(config.NUM_CLASSES).index_select(dim=0, index=labels)

    data = Variable(data)
    labels = Variable(labels)
    if torch.cuda.is_available():
        data = data.cuda()
        labels = labels.cuda()

    if training:
        classes, reconstructions = model(data, labels)
    else:
        classes, reconstructions = model(data)

    loss = capsule_loss(data, labels, classes, reconstructions)

    return loss, classes
Exemple #9
0
 def _train_forward(x):
     if self.args.aug:
         return utils.augmentation(x, self.model, self.args.upscale)
     else:
         return self.model(x)
def main():
    # Initialize the model for this run
    model_ft, input_size = initialize_model(model_name,
                                            num_classes,
                                            feature_extract,
                                            use_pretrained=True)
    model_ft.to(device)

    # Temporary header
    # directory - normal, bacteria, TB, COVID-19, virus
    dir_test = '/home/ubuntu/segmentation/output/COVID-19/'
    label = 3  # set 3 for COVID-19 for virus class

    # Data loader
    test_masked_images = sorted(glob.glob(dir_test + '*.npz'))
    #test_masks = sorted(glob.glob(dir_test + '*.mask.npy'))

    for masked_img in test_masked_images:

        test_masked_img = np.load(masked_img)
        #test_mask = np.load(mask)

        test_masked_img = Image.fromarray(test_masked_img).resize((1024, 1024))
        #test_mask = Image.fromarray(test_mask).resize((1024,1024))

        #test_img = np.asarray(test_img)
        #test_mask = np.round(np.asarray(test_mask))

        #test_masked = np.multiply(test_img, test_mask)

        test_normalized = test_masked_img

        h_whole = test_normalized.shape[0]  # original w
        w_whole = test_normalized.shape[1]  # original h

        background = np.zeros((h_whole, w_whole))
        background_indicer = np.zeros((h_whole, w_whole))

        sum_prob_wt = 0.0

        for i in range(header.repeat):

            non_zero_list = np.nonzero(test_normalized)

            random_index = random.randint(0, len(non_zero_list[0]) - 1)

            non_zero_row = non_zero_list[0][
                random_index]  # random non-zero row index
            non_zero_col = non_zero_list[1][
                random_index]  # random non-zero col index

            X_patch = test_normalized[
                int(max(0, non_zero_row - (header.img_size / 2))
                    ):int(min(h_whole, non_zero_row + (header.img_size / 2))),
                int(max(0, non_zero_col - (header.img_size / 2))
                    ):int(min(w_whole, non_zero_col + (header.img_size / 2)))]

            X_patch_img = data_transforms(
                augmentation(Image.fromarray(X_patch), rand_p=0.0,
                             mode='test'))
            X_patch_img_ = np.squeeze(np.asarray(X_patch_img))

            X_patch_1 = np.expand_dims(X_patch_img_, axis=0)
            X_patch_2 = np.expand_dims(X_patch_img_, axis=0)
            X_patch_3 = np.expand_dims(X_patch_img_, axis=0)

            X_ = np.concatenate((X_patch_1, X_patch_2, X_patch_3), axis=0)
            X_ = np.expand_dims(X_, axis=0)

            X = torch.from_numpy(X_)
            X = X.to(device)

            checkpoint = torch.load(
                os.path.join(header.save_dir,
                             str(header.inference_epoch) + '.pth'))
            model_ft.load_state_dict(checkpoint['model_state_dict'])
            model_ft.eval()
            outputs = model_ft(X)
            outputs_prob = F.softmax(outputs)

            prob = outputs_prob[0][label]
            prob_wt = prob.detach().cpu().numpy()

            gradcam = GradCAM.from_config(model_type='resnet',
                                          arch=model_ft,
                                          layer_name='layer4')

            mask, logit = gradcam(X, class_idx=label)
            mask_np = np.squeeze(mask.detach().cpu().numpy())
            indicer = np.ones((224, 224))

            mask_np = np.asarray(
                cv2.resize(
                    mask_np,
                    dsize=(
                        int(min(w_whole, non_zero_col +
                                (header.img_size / 2))) -
                        int(max(0, non_zero_col - (header.img_size / 2))),
                        int(min(h_whole, non_zero_row +
                                (header.img_size / 2))) -
                        int(max(0, non_zero_row - (header.img_size / 2))))))

            indicer = np.asarray(
                cv2.resize(
                    indicer,
                    dsize=(
                        int(min(w_whole, non_zero_col +
                                (header.img_size / 2))) -
                        int(max(0, non_zero_col - (header.img_size / 2))),
                        int(min(h_whole, non_zero_row +
                                (header.img_size / 2))) -
                        int(max(0, non_zero_row - (header.img_size / 2))))))

            mask_add = np.zeros((1024, 1024))
            mask_add[
                int(max(0, non_zero_row - (header.img_size / 2))
                    ):int(min(h_whole, non_zero_row + (header.img_size / 2))),
                int(max(0, non_zero_col - (header.img_size / 2))
                    ):int(min(w_whole, non_zero_col +
                              (header.img_size / 2)))] = mask_np
            mask_add = mask_add * prob_wt

            indicer_add = np.zeros((1024, 1024))
            indicer_add[
                int(max(0, non_zero_row - (header.img_size / 2))
                    ):int(min(h_whole, non_zero_row + (header.img_size / 2))),
                int(max(0, non_zero_col - (header.img_size / 2))
                    ):int(min(w_whole, non_zero_col +
                              (header.img_size / 2)))] = indicer
            indicer_add = indicer_add

            background = background + mask_add
            background_indicer = background_indicer + indicer_add  # number in this indicer means how many time the area included.

            sum_prob_wt = sum_prob_wt + prob_wt

        final_mask = np.divide(background, background_indicer + 1e-7)

        final_mask = np.expand_dims(np.expand_dims(final_mask, axis=0), axis=0)
        torch_final_mask = torch.from_numpy(final_mask)

        test_img = np.asarray(Image.fromarray(test_img).resize((1024, 1024)))
        test_img = (test_img - test_img.min()) / test_img.max()
        test_img = np.expand_dims(test_img, axis=0)
        test_img = np.concatenate((test_img, test_img, test_img), axis=0)
        torch_final_img = torch.from_numpy(np.expand_dims(test_img, axis=0))

        final_cam, cam_result = visualize_cam(torch_final_mask,
                                              torch_final_img)

        final_cam = (final_cam - final_cam.min()) / final_cam.max()
        final_cam_np = np.swapaxes(np.swapaxes(np.asarray(final_cam), 0, 2), 0,
                                   1)
        test_img_np = np.swapaxes(np.swapaxes(test_img, 0, 2), 0, 1)

        final_combined = test_img_np + final_cam_np
        final_combined = (final_combined -
                          final_combined.min()) / final_combined.max()

        plt.imshow(final_combined)
        plt.savefig(
            test_masked_img.split('.image.npy')[0] + '.patch.heatmap_' +
            '.png')
Exemple #11
0
    def update_core(self):
        cuda.Device(self.device).use()
        xp = self.model.xp
        if not self.source_only:
            # autoencoder training
            loss_rec_data = 0
            n_batch = 0
            total_batches = len(self.t_iter.dataset) / self.t_iter.batch_size
            for t_batch in self.t_iter:
                t_batch_augmented = [augmentation(data) for data in t_batch]
                t_imgs_copy, _ = self.converter(t_batch, self.device)
                t_imgs, _ = self.converter(t_batch_augmented, self.device)
                # whether to use denoising autoencoder
                if self.noise == 'impulse':
                    t_imgs = get_impulse_noise(t_imgs, 0.5)
                elif self.noise == 'gaussian':
                    t_imgs = get_gaussian_noise(t_imgs, 0.5)
                elif self.noise == 'no_noise':
                    pass
                else:
                    raise NotImplementedError
                t_encoding = self.model.encode(t_imgs)
                t_decoding = self.model.decode(t_encoding)
                loss_rec = F.mean_squared_error(t_decoding, t_imgs_copy)
                for opt in self.optimizers.values():
                    opt.target.cleargrads()
                loss_rec.backward()
                for opt in self.optimizers.values():
                    opt.update()
                loss_rec_data += loss_rec.data
                n_batch += 1
                if n_batch >= total_batches:
                    break
            loss_rec_data /= n_batch

        # encoder and classifier training
        loss_cla_s_data = 0
        acc_s_data = 0
        n_batch = 0
        total_batches = len(self.s_iter.dataset) / self.s_iter.batch_size
        for s_batch in self.s_iter:
            s_batch_augmented = [augmentation(data) for data in s_batch]
            s_imgs, s_labels = self.converter(s_batch_augmented, self.device)
            s_encoding = self.model.encode(s_imgs)
            s_logits = self.model.classify(s_encoding)
            loss_cla_s = F.softmax_cross_entropy(s_logits, s_labels)
            acc_s = F.accuracy(s_logits, s_labels)
            for opt in self.optimizers.values():
                opt.target.cleargrads()
            loss_cla_s.backward()
            for opt in self.optimizers.values():
                opt.update()
            n_batch += 1
            loss_cla_s_data += loss_cla_s.data
            acc_s_data += acc_s.data
            if n_batch >= total_batches:
                break
        loss_cla_s_data /= n_batch
        acc_s_data /= n_batch

        chainer.reporter.report({'acc_s': acc_s_data})
        chainer.reporter.report({'loss_cla_s': loss_cla_s_data})
        if not self.source_only:
            chainer.reporter.report({'loss_rec': loss_rec_data})
Exemple #12
0
    def __getitem__(self, index):
        # load image and crop
        seed = random.random()
        img = Image.open(self.image_list[index])
        # random crop image
        # =====================================
        # w,h = img.size
        # th, tw = self.scale
        # i = random.randint(0, h - th)
        # j = random.randint(0, w - tw)
        # img = F.crop(img, i, j, th, tw)
        # =====================================

        scale = random.choice(self.scale)
        scale = (int(self.image_size[0] * scale),
                 int(self.image_size[1] * scale))

        # randomly resize image and random crop
        # =====================================
        if self.mode == 'train':
            img = transforms.Resize(scale, Image.BILINEAR)(img)
            img = RandomCrop(self.image_size, seed, pad_if_needed=True)(img)
        # =====================================

        img = np.array(img)
        # load label
        label = Image.open(self.label_list[index])

        # crop the corresponding label
        # =====================================
        # label = F.crop(label, i, j, th, tw)
        # =====================================

        # randomly resize label and random crop
        # =====================================
        if self.mode == 'train':
            label = transforms.Resize(scale, Image.NEAREST)(label)
            label = RandomCrop(self.image_size, seed,
                               pad_if_needed=True)(label)
        # =====================================

        label = np.array(label)

        # augment image and label
        if self.mode == 'train':
            # set a probability of 0.5
            img, label = augmentation(img, label)

        # augment pixel image
        if self.mode == 'train':
            # set a probability of 0.5
            if random.randint(0, 1) == 1:
                img = augmentation_pixel(img)

        # image -> [C, H, W]
        img = Image.fromarray(img)
        img = self.to_tensor(img).float()

        if self.loss == 'dice':
            # label -> [num_classes, H, W]
            label = one_hot_it_v11_dice(label,
                                        self.label_info).astype(np.uint8)

            label = np.transpose(label, [2, 0, 1]).astype(np.float32)
            # label = label.astype(np.float32)
            label = torch.from_numpy(label)

            return img, label

        elif self.loss == 'crossentropy':
            label = one_hot_it_v11(label, self.label_info).astype(np.uint8)
            # label = label.astype(np.float32)
            label = torch.from_numpy(label).long()

            return img, label
Exemple #13
0
np.random.shuffle(pos_train)
np.random.shuffle(neg_train)

if not config.data_balance == 0:
    neg_train = neg_train[np.random.choice(
        len(neg_train), int(len(pos_train) * config.data_balance))]

print('\nPos: {} Neg: {}\n'.format(len(pos_train), len(neg_train)))

x_train = np.concatenate((pos_train, neg_train), axis=0)
y_train = np.hstack((np.ones(len(pos_train)), np.zeros(len(neg_train))))

# shuffle and augmentation
x_train, y_train = random_shuffle(x_train, y_train)
x_train = augmentation(x_train)

# Test Data
pos_test = generate_from_file_list([config.pos_test])
neg_test = generate_from_file_list([config.neg_test])

pos_y = np.ones(len(pos_test))
neg_y = np.zeros(len(neg_test))

print('\nReading Data Done.\n')

# Train
# Load Model
print('\nTrainging Begin\n')
print('Loading Model...')
pad = config.pad