コード例 #1
0
    def __init__(self,  data_size= 128, model_size=128, res='Legacy', sample='Normal', batch_sz=32, objective="malignancy",
                        do_augment=False, augment=None, use_class_weight=False, class_weight='dummy', debug=False,
                        val_factor = 1, balanced=False):

        self.objective = objective

        dataset = load_nodule_dataset(size=data_size, res=res, sample=sample, apply_mask_to_patch=debug)

        self.train_set = dataset[2]
        self.valid_set = dataset[1]

        self.batch_sz = batch_sz
        self.data_size  = data_size
        self.model_size = model_size

        self.val_factor = val_factor
        if balanced:
            self.trainN = 1332 // self.batch_sz

        else:
            self.trainN = 1689 // self.batch_sz
        self.valN = val_factor * (559 // self.batch_sz)

        self.balanced = balanced

        self.use_class_weight = use_class_weight
        self.class_weight_method = class_weight
        self.do_augment = do_augment
        self.augment = augment
        if do_augment: assert(augment is not None)

        print("Trainings Sets: {}, Validation Sets: {}".format(self.trainN, self.valN))
コード例 #2
0
    for run, out_size in zip(wRuns, outputs):
        for epoch in wEpchs:
            filename = Embed(run, epoch, post)
            try:
                images, pred, meta, labels, masks = pickle.load(
                    open(filename, 'br'))
            except:
                from Network.data import load_nodule_dataset, load_nodule_raw_dataset, prepare_data
                from Network.model import miniXception_loader
                from Network.siameseArch import siamArch
                from Network.directArch import directArch

                # prepare test data
                images, labels, masks, meta = \
                    prepare_data(load_nodule_dataset(size=size, res=res, sample=sample)[DataSubSet],
                                 categorize=False,
                                 reshuffle=False,
                                 return_meta=True,
                                 verbose=1)
                images = np.array([
                    crop_center(im, msk, size=in_size)[0]
                    for im, msk in zip(images, masks)
                ])
                print("Image size changed to {}".format(images.shape))
                print('Mask not updated')
                if network == 'dir':
                    model = directArch(miniXception_loader,
                                       input_shape,
                                       objective="malignancy",
                                       output_size=out_size,
コード例 #3
0
         'epoch': 0
     }
     generator = DataGeneratorDir(data_size=data_size,
                                  model_size=model_size,
                                  res=res,
                                  sample=sample,
                                  batch_sz=32,
                                  val_factor=1,
                                  balanced=False,
                                  do_augment=False,
                                  augment=data_augment_params,
                                  use_class_weight=True,
                                  class_weight='balanced')
     model.load_generator(generator)
 else:
     dataset = load_nodule_dataset(size=data_size, res=res, sample=sample)
     images_train, labels_train, masks_train = prepare_data_direct(
         dataset[2], classes=2, size=model_size)
     images_valid, labels_valid, masks_valid = prepare_data_direct(
         dataset[1], classes=2, size=model_size)
     images_train = np.array([
         crop_center(im, msk, size=model_size)[0]
         for im, msk in zip(images_train, masks_train)
     ])
     images_valid = np.array([
         crop_center(im, msk, size=model_size)[0]
         for im, msk in zip(images_valid, masks_valid)
     ])
     model.load_data(images_train,
                     labels_train,
                     images_valid,
コード例 #4
0
    def __init__(self,
                 data_size=128,
                 model_size=128,
                 res='Legacy',
                 sample='Normal',
                 batch_sz=32,
                 objective='malignancy',
                 do_augment=False,
                 augment=None,
                 use_class_weight=False,
                 class_weight='dummy',
                 debug=False,
                 val_factor=1,
                 balanced=False):

        self.objective = objective

        dataset = load_nodule_dataset(size=data_size,
                                      res=res,
                                      sample=sample,
                                      apply_mask_to_patch=debug)
        self.train_set = dataset[2]
        self.valid_set = dataset[1]

        self.batch_sz = batch_sz
        self.data_size = data_size
        self.model_size = model_size

        self.val_factor = val_factor

        if objective == 'malignancy':
            labels = np.array([entry[2] for entry in dataset[2]])
            Nb = np.count_nonzero(1 - labels)
            Nm = np.count_nonzero(labels)

            if balanced:
                self.trainN = 2 * np.minimum(Nb, Nm) // self.batch_sz
                #self.trainN = 666 // self.batch_sz
            else:
                self.trainN = (Nb + Nm) // self.batch_sz
                #self.trainN = 1023 // self.batch_sz
            self.valN = val_factor * (339 // self.batch_sz)

            self.balanced = balanced

            self.use_class_weight = use_class_weight
            if use_class_weight:
                self.class_weight = get_class_weight(labels, class_weight)
                print(
                    "Class Weight -> Benign: {:.2f}, Malignant: {:.2f}".format(
                        self.class_weight[0], self.class_weight[1]))
            else:
                self.class_weight = None
        elif objective == 'rating':
            self.trainN = len(self.train_set) // batch_sz
            self.valN = len(self.valid_set) // batch_sz
            if balanced:
                print("WRN: objective rating does not support balanced")
            self.balanced = False
            if use_class_weight:
                print(
                    "WRN: objective rating does not support use class weight")
            self.use_class_weight = False
            self.class_weight = None
        else:
            print("ERR: Illegual objective given ({})".format(objective))
            assert (False)

        self.do_augment = do_augment
        self.augment = augment
        if do_augment: assert (augment is not None)

        print("Trainings Sets: {}, Validation Sets: {}".format(
            self.trainN, self.valN))
コード例 #5
0
def find_full_entry(query, raw):
    id = 0
    for entry in raw:
        entry_meta = entry[3]
        if  query[0] == entry_meta[0] and \
                query[1] == entry_meta[1] and \
                query[2] == entry_meta[2] and \
                query[3] == entry_meta[3]:
            print(id)
            return entry
        id = id + 1
    return None


data128 = load_nodule_dataset(128,
                              res='Legacy',
                              sample='Normal',
                              apply_mask_to_patch=True)
data128 = data128[0] + data128[1] + data128[2]

data144 = load_nodule_dataset(144,
                              res=0.7,
                              sample='Normal',
                              apply_mask_to_patch=True)
data144 = data144[0] + data144[1] + data144[2]

im_id = 1105
match = find_full_entry(data128[im_id][3], data144)

plt.figure()
plt.subplot(131)
plt.imshow(data128[im_id][0])
コード例 #6
0
    return image_r, mask_r


def test_augment(dataset):
    im_array = []
    for entry in dataset:
        image, mask = entry[0], entry[1]
        image, mask = augment(image,
                              mask,
                              min_size=128,
                              max_angle=30,
                              flip_ratio=0.3)
        im_array.append(image)

    values = np.concatenate([np.array(im).flatten() for im in im_array])
    print(values.shape)
    plt.hist(values, 100)
    plt.show()


if __name__ == "__main__":

    try:
        from Network.data import load_nodule_dataset
    except:
        from data import load_nodule_dataset

    data = load_nodule_dataset()
    test_augment(data[2])
コード例 #7
0
DataSubSet = 2

if DataSubSet == 0:
    print("Test Set Analysis")
elif DataSubSet == 1:
    print("Validation Set Analysis")
elif DataSubSet == 2:
    print("Training Set Analysis")
else:
    assert False

dataset = load_nodule_raw_dataset()[DataSubSet]
print("Raw Data Loaded: {} entries".format(len(dataset)))

# prepare test data
images_test, labels_test = prepare_data(load_nodule_dataset()[0],
                                        classes=2,
                                        size=size)
print("Data ready: images({}), labels({})".format(images_test.shape,
                                                  labels_test.shape))
print("Range = [{},{}]".format(np.min(images_test), np.max(images_test)))

assert len(dataset) == images_test.shape[0]

#model = miniXception(None, (size, size,1),'avg', weights='w_002_37-0.95-0.82.h5')
#compile(model, learning_rate=0.01)

model = directArch(miniXception_loader, input_shape, 2)
#model.summary()
#model.compile()
model.load_weights('w_007_36-0.91-0.86.h5')