Example #1
0
    def setUp(self):

        gvf = 'mni_icbm152_t1_tal_nlin_sym_09c_atlas/atlas_csf.mnc'
        self.fname = os.path.join(DATA_PATH, gvf)
        self.img = minc.Label(self.fname)
        self.tmp = create_tmp_filename(prefix='atlas_csf',
                                       suffix='.mnc',
                                       remove=False)
Example #2
0
    def setUp(self):

        gvf = LABEL_FILE
        self.fname = os.path.join(DATA_PATH, gvf)
        self.img = minc.Label(self.fname)
        self.tmp = create_tmp_filename(prefix='atlas_csf',
                                       suffix='.mnc',
                                       remove=False)
Example #3
0
            or options.load is not None) and options.image is not None:
        if options.debug: print("Loading images...")
        # convert to float as we go

        #images= [ minc.Image(i).data.astype(np.float32)  for i in options.image ]
        image = minc.Image(options.image).data.astype(np.float32)

        if options.debug: print("Done")

        clf = None

        if options.load is not None:
            #TODO: load classifications
            pass
        else:
            prior = minc.Label(options.prior)

            labels = list(np.unique(prior.data))
            counts = list(np.bincount(np.ravel(prior.data)))

            if 0 in labels:
                if options.debug: print("Label 0 will be discarded...")
                labels.remove(0)
                counts.pop(0)  # assume it's first

            num_classes = len(labels)

            if options.debug:
                print("Available labels:{} counts: {} ".format(
                    repr(labels), repr(counts)))
Example #4
0
                          (images[0].shape[1] / 2.0) * 100)
            images.append((c[2] - images[0].shape[2] / 2.0) /
                          (images[0].shape[1] / 2.0) * 100)

        mask = None

        if options.debug: print("Done")

        man = None
        training_X = None
        Y = None

        if options.debug: print("Creating training dataset for classifier")

        if options.mask is not None:
            mask = minc.Label(options.mask)
            training_X = np.column_stack(
                tuple(np.ravel(j[mask.data > 0]) for j in images))
        else:
            training_X = np.column_stack(tuple(np.ravel(j) for j in images))

        if options.debug:
            print("Fitting {}, dataset size:{} ...".format(
                options.method, training_X.shape))

        if options.method == "LLE":
            man = manifold.LocallyLinearEmbedding(n_components=options.c,
                                                  n_neighbors=options.n,
                                                  eigen_solver='auto',
                                                  method='standard')
            Y = man.fit_transform(training_X)
Example #5
0
        if options.coord:
            # add features dependant on coordinates
            c = np.mgrid[0:images[0].shape[0], 0:images[0].shape[1],
                         0:images[0].shape[2]]

            # use with center at 0 and 1.0 at the edge, could have used preprocessing
            images.append(
                (c[0] - images[0].shape[0] / 2.0) / (images[0].shape[0] / 2.0))
            images.append(
                (c[1] - images[0].shape[1] / 2.0) / (images[0].shape[1] / 2.0))
            images.append(
                (c[2] - images[0].shape[2] / 2.0) / (images[0].shape[1] / 2.0))

        mask = None
        if options.mask is not None:
            mask = minc.Label(options.mask)
        if options.debug: print("Done")

        clf = None

        if options.load is not None:
            clf = xgb.Booster(model_file=options.load)
        else:
            prior = minc.Label(options.prior)

            labels = list(np.unique(prior.data))
            counts = list(np.bincount(np.ravel(prior.data)))

            if 0 in labels:
                if options.debug: print("Label 0 will be discarded...")
                labels.remove(0)
Example #6
0
         train=json.load(f)
 else:
     with open(options.train_csv,'rb') as f:
         train=list(csv.reader(f))
 
 training_images=[]
 training_output=[]
 training_err=[]
 
 #go over training samples
 clf=None
 
 #scaler=preprocessing.StandardScaler().fit(X)
 
 for (i,inp) in enumerate(train):
     mask  =minc.Label(  inp[-3] ).data
     ground=minc.Label(  inp[-2] ).data
     auto  =minc.Label(  inp[-1] ).data
     
     # normalize input features to zero mean and unit std
     if options.normalize:
       images=[ preprocessing.scale(minc.Image(k).data) for k in inp[0:-3] ]
     else:
       images=[ minc.Image(k).data for k in inp[0:-3] ]
     
     # store training data
     training_images.append( prepare_features( options, images, None, auto, mask ) )
     
     # perform direct learning right now
     training_output.append( ground[mask>0] )
     
Example #7
0
if __name__ == "__main__":
    history=minc.format_history(sys.argv)
    
    options = parse_options()
    # load prior and input image
    if options.debug: print("Loading images...")
    # convert to float as we go
    
    ref_image=minc.Image(options.reference).data.astype(np.float32)
    image=minc.Image(options.image).data.astype(np.float32)
    
    if options.debug: print("Done")
    
    mm=(image>0)
    if options.mask is not None:
        mask  = minc.Label(options.mask)
        mm = np.logical_and(image>0 , mask.data>0 )
        
    rmm=(ref_image>0)
    if options.refmask is not None:
        refmask  = minc.Label(options.refmask)
        rmm = np.logical_and(ref_image>0 , refmask.data>0 )
    
    #print ref_image[rmm]
    rmin=np.amin(ref_image[rmm])
    rmax=np.amax(ref_image[rmm])
    print("Ref Range {} - {}".format(rmin,rmax))
    imin=np.amin(image[mm])
    imax=np.amax(image[mm])
    print("Image Range {} - {}".format(imin,imax))
    
Example #8
0
            if n_i == -1:
                n_i = len(i) - 1
            elif n_i != (len(i) - 1):
                raise "Inconsistent number of images:{}".format(repr(i))

        if n_i == -1:
            raise "No input images!"

        if options.debug:
            print("Loading {} images ...".format(n_i * len(train_library)))

        images = [[
            minc.Image(os.path.join(prefix, j[i])).data for i in range(n_i)
        ] for j in train_library]
        segs = [
            minc.Label(os.path.join(prefix, j[n_i])).data
            for j in train_library
        ]

        priors = []
        # TODO: check shape of all images for consistency
        _shape = images[0][0].shape

        if options.coord:
            # add features dependant on coordinates
            c = np.mgrid[0:_shape[0], 0:_shape[1], 0:_shape[2]]
            # use with center at 0 and 1.0 at the edge, could have used preprocessing
            priors.append((c[0] - _shape[0] / 2.0) / (_shape[0] / 2.0))
            priors.append((c[1] - _shape[1] / 2.0) / (_shape[1] / 2.0))
            priors.append((c[2] - _shape[2] / 2.0) / (_shape[1] / 2.0))