Example #1
0
 def dataGenerator(hiddens, separatingNumber, unobservedRate,
                   targetIndeces):
     #print elems
     #print targetelems
     #print len(targetIndeces)
     rs = Toolbox.GenerateRandomSeparation(targetIndeces, hiddens)
     return Toolbox.Take(rs, separatingNumber)
Example #2
0
def process(set, ent, inputEnt):
    highres = misc.imread(ent, flatten=False).astype(np.float32)
    lowres = misc.imread(inputEnt, flatten=False).astype(np.float32)
    width = highres.shape[1]
    height = highres.shape[0]

    print "processing %s (%dx%d)" % (ent, width, height)

    defFile = "scratch/test_SR_deploy.prototxt"
    pycnn.preprocessFile("deploy.prototmp", defFile, {"WIDTH": width, "HEIGHT": height})

    if "youtube" in set:
        print "using youtube mean"
        mean_bgr = tb.readFloat("/misc/lmbraid17/ilge/caffe/superresolution/datasets/youtube/test/mean3.float3").astype(
            np.float32
        )
    else:
        mean_bgr = tb.readFloat("/home/ilge/data/caffe/superresolution/datasets/coco/mean.float3").astype(np.float32)

    mean_bgr = cv2.resize(mean_bgr, (width, height), interpolation=cv2.INTER_CUBIC)
    mean_bgr_lowres = cv2.resize(mean_bgr, (width / 4, height / 4), interpolation=cv2.INTER_CUBIC)

    highres_nomean_bgr = highres[:, :, (2, 1, 0)] - mean_bgr
    lowres_nomean_bgr = lowres[:, :, (2, 1, 0)] - mean_bgr_lowres

    caffe.set_phase_test()
    caffe.set_mode_gpu()
    caffe.set_logging_disabled()
    net = caffe.Net(defFile, modelFile)

    print "network forward pass"
    blobs = net.forward(
        highres=np.asarray([net.preprocess("highres", highres_nomean_bgr / 255.0)]),
        lowres=np.asarray([net.preprocess("lowres", lowres_nomean_bgr / 255.0)]),
    )

    output_bgr = 255.0 * blobs["output"].transpose(0, 2, 3, 1).squeeze()
    output_bgr += mean_bgr
    output_bgr[output_bgr < 0] = 0
    output_bgr[output_bgr > 255] = 255

    os.system("mkdir -p %s/%s" % (out_dir, set))
    basename = os.path.basename(ent)[:-4].replace("_GT", "")
    misc.imsave("%s/%s/%s-gt.png" % (out_dir, set, basename), highres)
    misc.imsave("%s/%s/%s-recon.png" % (out_dir, set, basename), output_bgr[:, :, (2, 1, 0)])

    # nn, li, cu = tb.computeBasePSNRs(ent, downsampledFilename=inputEnt)
    nn = tb.PSNR()
    li = tb.PSNR()
    cu = tb.PSNR()

    psnr = tb.PSNR()
    psnr.set(blobs["psnr"][0, 0, 0, 0], blobs["psnr_y"][0, 0, 0, 0])

    print "nn=%5s, li=%5s, cu=%5s, net=%5s" % (nn, li, cu, psnr)

    return (nn, li, cu, psnr)
Example #3
0
    def __findReplaceAction(self, button):

        search = self.__searchWidget.getText()
        replace = self.__replaceWidget.getText()

        searchType = self.__searchReplaceNodePlugsMenu.getItem(
            self.__searchReplaceNodePlugsMenu.getCurrentIndex())
        searchScope = self.__searchReplaceScopeMenu.getItem(
            self.__searchReplaceScopeMenu.getCurrentIndex())

        Toolbox.searchAndReplace(self, search, replace, searchType,
                                 searchScope)
Example #4
0
 def __init__(self,folder,start_meas=datetime(2000,1,1),end_meas=datetime(2100,1,1)):    
     self.name = folder 
     sequences = glob.glob("%s/*"%self.name) 
     sequences = [s for s in sequences if os.path.isdir(s)]
     sequences.sort()  
     print sequences    
     all_dates = []
     for s in sequences:
         all_dates+=[Toolbox.get_time_seq(s)]
     start,end = searchsorted(all_dates,[start_meas,end_meas])    
     self.sequences = sequences[start:end]
     self.get_time_dir()
     self.start_date = Toolbox.get_time_seq(self.sequences[0]) 
Example #5
0
    def makeSlice(self, im_path, slice_name):

        m = np.asarray(cv2.imread(im_path, 0), dtype=np.int8)
        # In extra layer, for easier layering
        if m.shape[1] != 512:
            return False

        m = np.subtract(m, 32768)  # Convert to HU units
        m = np.expand_dims(m, axis=0)
        slice = torch.from_numpy(m)
        slice = slice.type(torch.int16)

        Toolbox.save_tensor(slice, slice_name)
Example #6
0
 def get_netd(self,filters,spav='all',non_ill=1): 
     self.organized(spav=spav)
     mean_netd = zeros([11])
     mean_ner = zeros([11])
     mean_sigma = zeros([11])
     mean_sitf_temp = zeros([11])
     mean_sitf_rad = zeros([11])
     
     all_mean = self.all_mean
     all_std = self.all_std
     all_tms = self.all_tms
     
     for fil in filters:
         k = filters_positions.index(fil)  
         if spav == 'all' or spav == 'fast':
             correct_pixels = [i for i in range(2) if all_mean[k,0,i]*all_mean[k,1,i]!=0] 
         else:    
             correct_pixels = [i for i in self.illuminated if all_mean[k,0,i]*all_mean[k,1,i]!=0] 
     
         if not array(correct_pixels).size: # anomalies on all pixels, then no calculations
             print "NO GOOD PIXELS"
             mean_netd[num]=0
             mean_sigma[num]=0
             mean_sitf[num]=0              
             
         else:                
             i_abb = searchsorted(self.temp_tms,all_tms[k,0])-1
             i_hbb = searchsorted(self.temp_tms,all_tms[k,1])-1
             
             sitf_temp = (all_mean[k,1,:] - all_mean[k,0,:])/(self.temp_hbb[i_hbb] - self.temp_abb[i_abb])  
             
             rad_hbb = Toolbox.radiance(self.temp_hbb[i_hbb]+273.15,fil,emiss_wls,emiss,self.temp_pm[i_hbb])
             rad_abb = Toolbox.radiance(self.temp_abb[i_abb]+273.15,fil,emiss_wls,emiss,self.temp_pm[i_abb])
             sitf_rad = (all_mean[k,1,:] - all_mean[k,0,:])/(rad_hbb - rad_abb) 
                             
             netd = abs(all_std[k,0,:]/sitf_temp)  
             ner = abs(all_std[k,0,:]/sitf_temp)
             
             mean_netd[k] = mean(netd[correct_pixels])  
             mean_ner[k] = mean(ner[correct_pixels])
             mean_sitf_temp[k] = mean(sitf_temp[correct_pixels])
             mean_sitf_rad[k] = mean(sitf_rad[correct_pixels])
             mean_sigma[k] = mean(all_std[k,0,correct_pixels])
             
     self.sigma = mean_sigma         
     self.netd = mean_netd
     self.ner = mean_ner
     self.sitf_temp = mean_sitf_temp
     self.sitf_rad = mean_sitf_rad
        
Example #7
0
 def getBridgePage(self):
     # Get the bridge page for applicable radios.
     # Data is a big JSON.
     data = self.getStatusPage('brmacs.cgi?brmacs=y&_=' +\
         Toolbox.timestamp())
     brm = data['brmacs']
     bridges = {}
     # The last element is always null.
     datum = data.pop()
     while datum:
         try:
             # Attempt to look it up from the existing bridges.
             bridge = bridges[datum['bridge']]
             bridge['interfaces'].add(datum['port'])
             bridge['macs'].add(datum['hwaddr'])
         except KeyError:
             # If the bridge is unknown, initialize it.
             bridge = {}
             # Sets for deduplication.
             bridge['interfaces'] = set(datum['port'])
             bridge['macs'] = set(datum['hwaddr'])
             bridges.append(bridge)
         bridge = {}
         bridges[datum['bridge']] = {}
         datum = data.pop()
     return bridges
Example #8
0
 def __init__(self, root, canvas):
     self.root = root
     self.canvas = canvas
     self.sevenCards = {}
     self.initClickedCards()
     self.currentHand = []
     self.setHand = []
     self._updateGeo()
     self.shoe = self.newShoe()
     self.cardValues = self.initialCardValue()
     self.cardSuits = self.initialCardSuit()
     self.correctPoint = 0
     self.wrongPoint = 0
     self.toolbox = Toolbox.Toolbox()
     # Deal button
     dealButton = Button(self.root, text='Deal', command=self.displayCards)
     dealButton.pack(side=LEFT)
     # Check Set Hand
     checkButton = Button(self.root, text='Check Set Hand', command=self.checkSet)
     checkButton.pack(side=LEFT)
     # Houseway button
     housewayButton = Button(self.root, text='Houseway', command=self._houseway)
     housewayButton.pack(side=LEFT)
     # Bind the left click to onClickMove
     self.canvas.bind("<Button-1>", self.onClickMove)
     self.welcomeScreen()
Example #9
0
    def analyze(self,frame_number=1000,spav="None",illuminated=illuminated,non_illuminated=non_illuminated,non_ill=1):
        """Compute average frame and standard deviation
        frame_number: defines how many frames should be used (default is all frames)
        spav: spatial average to be computed
          - "all" to return a single average value for illuminated pixels
          - integer n to return spatial moving average with square (1+n)**2
          - nothing to compute no spatial average
        """   
        
        f = open(self.name,'rb') # open raw file    
        header = np.fromfile(f, dtype=np.uint32,count=4)
        access_table = np.fromfile(f, dtype=np.uint16,count=self.npixels) # id of the used pixels on the 120 x 160 standard grid of the bolometer     
        tms = []
        data_1D = []  # list to save all frames
        data_1D_non_ill = []
        for k in range(min(frame_number,self.nframes)):
            t = np.fromfile(f, dtype=np.uint32,count=1) # frame timestamp  
            frame = array(np.fromfile(f, dtype=np.uint16,count=self.npixels)).astype(float) # values for all pixels, returned in a 1D array 
            
            if len(frame) == self.npixels: # prevent from keeping erroneous frames
                tms+= [int(t)]      
                if spav == "fast": # like "all" without eliminating bad pixels before averaging
                    frame = mean(frame[illuminated] - non_ill*frame[non_illuminated])
                    data_1D+=[frame*ones(2)]
                                        
                elif isinstance(spav, int): 
                    """spatial mooving average on a spav x spav square"""
                    data_1D+= [Toolbox.get_av(frame,spav)]
                  
                else:
                    data_1D+=[frame] # append new frame to existing ones
                
        if data_1D == []: # mark bad file to avoid reading in a sequence
            self.good = False 
            
        elif shape(data_1D)[0] == 1: # mark bad file to avoid reading in a sequence
            self.good = False                                  
        
        data_1D = array(data_1D)
       
        self.std = std(data_1D,axis=0) 
        self.mean = mean(data_1D,axis=0)         

        # remove bad pixels from analysis 
        if spav == "fast":
            corr = where(self.std<1.)[0]
        
        elif isinstance(spav, int): 
            corr = where(self.std<1.2)[0]  
                        
        else:    
            corr = where(self.std<1.8)[0]
         
        if not corr.size:
            print self.name,"No correct pixels"            

        self.all_tms = array(tms)
        self.tms = mean(tms)    
        self.correct_pixels = corr
Example #10
0
    def createMask(shape, unobservedRate):
        elems = prod(shape)
        maskedElems = int(elems * unobservedRate)
        mask = []
        mask.extend(repeat(0, maskedElems))
        mask.extend(repeat(1, elems - maskedElems))

        return array(Toolbox.ShuffleArray(mask)).reshape(shape)
Example #11
0
def saveImages(img,surf,seg_original,removed_seg,case_str,output_path,removed_output_path):    
    img_mask =tb.get_image_mask(img,surf)
    con_seg = tb.fill_contour(img_mask)

    save_con_seg = con_seg[margin:con_seg.shape[0]-margin,margin:con_seg.shape[1]-margin,margin:con_seg.shape[2]-margin]
    save_con_seg_c = np.where(save_con_seg==255, 2, seg_original)

    nii1 = nib.Nifti1Image(save_con_seg_c,affine=None)
    nib.save(nii1,output_path + case_str + '/prediction_contour.nii.gz')

    removed_seg = removed_seg[margin:con_seg.shape[0]-margin,margin:con_seg.shape[1]-margin,margin:con_seg.shape[2]-margin]
    seg_original = np.where(removed_seg==1,1,seg_original)

    save_con_seg_c = np.where(save_con_seg==255, 2, seg_original)

    nii1 = nib.Nifti1Image(save_con_seg_c,affine=None)
    nib.save(nii1,removed_output_path + case_str + '/prediction_contour.nii.gz')
Example #12
0
    def separateArrayRandomly(length, blocks):
        seq = []
        index = 0
        for size in getSeparatingArraySize(length, blocks):
            seq.extend([index] * size)
            index += 1

        return Toolbox.ShuffleArray(seq)
Example #13
0
 def mac(self):
     macs = self.network.findAdj(self, ntype=Mac)
     try:
         return Toolbox.getUnique(macs)
     except IndexError:
         return None
     except Toolbox.NonUniqueError:
         # If there are multiples, just give the first one.
         return macs[0]
Example #14
0
    def snmpwalk(self, mib):
        # Walks specified mib
        ips = self.ips
        # Get a list of communities, starting with any that are known to
        # work on this host.
        communities = self.network.communities.copy()
        if self.community:
            # Means we have a functional community string. Use that first.
            communities.append(self.community)
        communities.reverse()

        def scanAllCommunities(ip):
            for community in communities:
                results = scan(ip, community)
                if results:
                    return results
            return False

        def scan(ip, community):
            session = self.snmpInit(ip, community)
            try:
                responses = session.walk(mib)
                self.community = community
                self.setmgmntip(ip, True)
                print('Response on', ip, 'with', community)
                return responses
            except easysnmp.exceptions.EasySNMPNoSuchNameError:
                # Probably means that you're hitting the wrong kind of device.
                self.community = None
                self.setmgmntip(ip, False)
                raise
            except easysnmp.exceptions.EasySNMPTimeoutError:
                # Either the community string is wrong, or the address is dead.
                print('No response on', ip, 'with', community)
                self.community = None
                self.setmgmntip(ip, False)
                pass
            return False

        # First, we try using known-good settings for communicating with this
        # host.
        if self.mgmntip:
            if self.community:
                results = scan(self.mgmntip, self.community)
                if results:
                    return results
            results = scanAllCommunities(self.mgmntip)
            if results:
                return results
        # If we have no known-good settings, we just iterate over everything.
        for ip in ips:
            if not Toolbox.ipInNetworks(ip, self.network.inaccessiblenets):
                results = scanAllCommunities(ip)
                if results:
                    return results
        return False
Example #15
0
    def test_06_predefined_list(self):
        tool1 = {'file':'tool1.py', 'function':'do_stuff', 'args':['list'], 'return':['int']}
        tool2 = {'file':'tool2.py'}
        
        tb2 = mod_tool.Toolbox(lst=[tool1, tool2])

        tb2.list()
        
        self.assertEqual(tb2.tool_as_string(tool1), 'tool1.py.do_stuff\n')
        self.assertEqual(tb2.tool_as_string(tool2), 'tool2.py\n')
Example #16
0
    def analyze(self,frame_number=1000,spav="None"):
        """Compute average frame and standard deviation
        frame_number: defines how many frames should be used (default is all frames)
        spav: spatial average to be computed
          - "all" to return a single average value for illuminated pixels
          - integer n to return spatial moving average with square (1+n)**2
          - nothing to compute no spatial average
        """
        f = open(self.name,'rb') # open raw file    
        header = np.fromfile(f, dtype=np.uint32,count=4)
        access_table = np.fromfile(f, dtype=np.uint16,count=self.npixels) # id of the used pixels on the 120 x 160 standard grid of the bolometer     
        tms = []
        data_1D = []  # list to save all frames

        for k in range(min(frame_number,self.nframes)):
            t = np.fromfile(f, dtype=np.uint32,count=1) # frame timestamp  
            frame = array(np.fromfile(f, dtype=np.uint16,count=self.npixels)).astype(float) # values for all pixels, returned in a 1D array 
            
            if len(frame)==self.npixels: # prevent from keeping erroneous frames
                tms+= [int(t)]                
                if spav=="all": 
                    """average on all illuminated pixels, only one value is saved per frame but an array is created to keep consistency with other options"""
                    frame = mean(frame[illuminated])
                    data_1D+= [frame*ones(2)] 
                    
                if isinstance(spav, int): 
                    """spatial mooving average on a spav x spav square"""
                    data_1D+=[Toolbox.get_av(frame,spav)]
                                    
                else: 
                    """no spatial average"""
                    data_1D+=[frame] # append new frame to existing ones
                                 
        self.mean = mean(array(data_1D),axis=0) 
        self.std = std(array(data_1D),axis=0)
                
        if spav == "all":
            corr=where(self.std<5.)[0]  # identify eroneous pixels
           
        elif isinstance(spav, int): 
            corr = where(self.std<1.2)[0]  
            
        else: 
            corr = where(self.std<2.)
            
        if not corr.size:
            print self.name,"No correct pixels",self.std
            plot(array(data_1D))[0]
            show()

        self.all_tms = array(tms)
        self.tms = mean(tms)    
        self.correct_pixels = corr
Example #17
0
 def getInterfacePage(self):
     # Get the list of network interfaces from the web interface.
     data = self.getStatusPage('iflist.cgi?_=' + str(Toolbox.timestamp()))
     interfaces = {}
     if interfaces:
         for ifdata in data['interfaces']:
             interface = {}
             try:
                 # The typing is for consistency with the SNMP data.
                 interfaces[Mac(ifdata['hwaddr'])] = set([Ip(ifdata['ipv4']['addr'])])
             except KeyError:
                 # Some interfaces won't have an address.
                 pass
     return interfaces
Example #18
0
def makeOrderLUT(folder, section_size, val_ratio, full_lut=False):
    each_side = section_size // 2
    num_sections = 0
    LUT = []
    for sf in Toolbox.get_subs(folder):
        images = Toolbox.get_subs(sf)
        num_ims = len(images)
        if num_ims < section_size:
            continue

        folder_sections = []
        i = each_side
        while i < num_ims - each_side:
            folder_sections.append(images[i - each_side:i + each_side + 1])
            i += 1
        LUT.append(folder_sections)

        if not full_lut:  # For testing only
            num_sections += 1
            if num_sections > 100:
                break

    train, val = splitLUT(LUT, val_ratio)
    return [train, val]
Example #19
0
def process(set,ent,inputEnt):
    highres = misc.imread(ent, flatten=False).astype(np.float32)
    lowres = misc.imread(inputEnt, flatten=False).astype(np.float32)
    width = highres.shape[1]
    height = highres.shape[0]

    print 'processing %s (%dx%d)' % (ent, width, height)

    defFile = 'scratch/test_SR_deploy.prototxt'
    pycnn.preprocessFile('deploy.prototmp', defFile, {'WIDTH': width, 'HEIGHT': height})

    if 'youtube' in set:
        print 'using youtube mean'
        mean_bgr = tb.readFloat("/misc/lmbraid17/ilge/caffe/superresolution/datasets/youtube/test/mean3.float3").astype(np.float32)
    else:
        mean_bgr = tb.readFloat("/home/ilge/data/caffe/superresolution/datasets/coco/mean.float3").astype(np.float32)

    mean_bgr = cv2.resize(mean_bgr, (width, height), interpolation=cv2.INTER_CUBIC)
    mean_bgr_lowres = cv2.resize(mean_bgr, (width/4, height/4), interpolation=cv2.INTER_CUBIC)

    highres_nomean_bgr = highres[:, :, (2, 1, 0)] - mean_bgr
    lowres_nomean_bgr = lowres[:, :, (2, 1, 0)] - mean_bgr_lowres

    caffe.set_phase_test()
    caffe.set_mode_gpu()
    caffe.set_logging_disabled()
    net = caffe.Net(
       defFile,
       modelFile
    )

    print 'network forward pass'
    blobs = net.forward(highres=np.asarray([net.preprocess('highres', highres_nomean_bgr / 255.0)]),lowres=np.asarray([net.preprocess('lowres', lowres_nomean_bgr / 255.0)]))

    output_bgr = 255.0 * blobs['output'].transpose(0, 2, 3, 1).squeeze()
    output_bgr += mean_bgr
    output_bgr[output_bgr < 0] = 0
    output_bgr[output_bgr > 255] = 255

    os.system('mkdir -p %s/%s' % (out_dir, set))
    basename = os.path.basename(ent)[:-4].replace('_GT', '')
    misc.imsave('%s/%s/%s-gt.png' % (out_dir, set, basename), highres)
    misc.imsave('%s/%s/%s-recon.png' % (out_dir, set, basename), output_bgr[:, :, (2, 1, 0)])

    #nn, li, cu = tb.computeBasePSNRs(ent, downsampledFilename=inputEnt)
    nn = tb.PSNR(); li=tb.PSNR(); cu=tb.PSNR()

    psnr = tb.PSNR()
    psnr.set(blobs['psnr'][0, 0, 0, 0],  blobs['psnr_y'][0, 0, 0, 0])

    print 'nn=%5s, li=%5s, cu=%5s, net=%5s' % (nn, li, cu, psnr)

    return (nn, li, cu, psnr)
Example #20
0
    def arpCrawl(self, timestamp=Toolbox.timestamp()):
        host = self.hosts
        def scan(host):
            # Update the timestamp.
            host.touch()
            # If we can't get a hostname, nothing else is going to work.
            print('Scanning host...', end=' ')
            if host.scanHostname():
                print(host.hostname)
                print('Scanning interfaces...', end=' ')
                host.scanInterfaces()
                host.print()

                print(len(host.addresses), 'interfaces discovered.')
                if not host.vendor == 'ubiquiti':
                    print('Scanning ARP...', end=' ')
                    arps = host.scanArpTable()
                    print(len(arps), 'arp records discovered.')
                #host.print()
                host.scanLocation()
                print('Host located at:', host.location, 'coords:', host.coords)
                print('Host\'s new timestamp:', host.updated)
                print('There are', len(self.nodes()), 'nodes.')
                print('Of which', len([a for a in self.nodes() if type(a) == Host]), 'are hosts.')
                print('Of which', len([a for a in self.nodes() if type(a) == Host\
                    and a.hostname == 'AwbreyM20']), 'are AwbreyM20.')
            else:
                print('Scan failed at', host.ips)
        hosts = self.hosts
        # Sort the list so that the least recently updated is last.
        for host in hosts:
            # Continuously scan the entire network.
            # Take the oldest entry.
            scan(host)
            #nx.draw(self, nx.spring_layout(self), node_size=3, node_color='yellow', font_size=6)
            #plt.tight_layout()
            #plt.savefig('graph.png', format='PNG')

            # Write safely
            nx.write_gml(self, 'network.gml.tmp', stringizer=Toolbox.stringize)
            os.rename('network.gml.tmp', 'network.gml')
            hosts += [h for h in self.hosts if h.updated < timestamp]
def main():
    # define projects
    pim_projects = prj.Projects()
    pim_projects.add_project(gmail())
    pim_projects.add_project(outlook())
    pim_projects.add_project(file_metadata())

    print(pim_projects)

    # setup tools needed
    tools = mod_tool.Toolbox()
    tools.add({
        'name': 'email download',
        'file': myfldr + 'download_email.py',
        'interval': 'Daily'
    })
    tools.add({
        'name': 'email process',
        'file': myfldr + 'process_email.py',
        'interval': 'Daily'
    })
    def open(self):
        fileName, _ = QFileDialog.getOpenFileName(self, "Open File",
                                                  QDir.currentPath())
        if fileName:
            image = QImage(fileName)
            if image.isNull():
                QMessageBox.information(self, "Image Viewer",
                                        "Cannot load %s." % fileName)
                return

            raster = gdal.Open(fileName)
            data = Toolbox.readGeotiff(raster)

            # self.imageLabel.setPixmap(QPixmap.fromImage(data))
            self.imageLabel.setPixmap(QPixmap.fromImage(image))
            self.scaleFactor = 1.0

            self.fitToWindowAct.setEnabled(True)
            self.updateActions()

            if not self.fitToWindowAct.isChecked():
                self.imageLabel.adjustSize()
Example #23
0
 def __init__(self, content, title, rule_list=None):
     self.title = title + '\n'
     self.child_list = []
     self.content = content.replace(' ', '')
     self.content = re.sub(r'^\s{1,}$', '', self.content)
     self.rule_list = []
     if rule_list == None:
         self.rule_list = Toolbox.readini("structT.ini")
     else:
         self.rule_list.extend(rule_list)
     self.word_count = len(self.content)
     if article.num == 0:
         self.Attribute = "root"
     elif self.content == self.title:
         self.Attribute = "bottom"
     else:
         self.Attribute = "child"
     article.num += 1
     try:
         self.get_par()
     except:
         pass
Example #24
0
#!/usr/bin/python

import Toolbox as tb
import OpticalFlow.downsample as downsample
import numpy as np

flow = tb.readFlow('flow.flo')

flow_ds2 = downsample(flow, 2)
flow_ds4 = downsample(flow, 4)

tb.writeFlow('ds2.flo', flow_ds2)
tb.writeFlow('ds4.flo', flow_ds4)
import sys
sys.path.append('/Users/shomakitamiya/Documents/python/snake3D/src/Toolbox')
import math

import matplotlib

import geomdl.visualization.VisMPL as VisMPL
import matplotlib.pyplot as plt
import numpy as np
from geomdl import BSpline
from geomdl.fitting import approximate_surface
from geomdl.knotvector import generate
from mpl_toolkits.mplot3d import Axes3D  # noqa: F401 unused import

import Toolbox as tb

img = np.random.rand(100, 100, 100, 3)

tb.show_ct_image(img)
Example #26
0
    
    waitVanish(Pattern("SageTimeslip-1.png").similar(0.96))
    assert exists(Pattern("Expense.png").similar(0.97))
    type(Key.ESC)
    wait(.5)
    onAppear(Pattern("SageTimeslip-1.png").similar(0.96), type("y"))
    assert exists(Pattern("Time.png").similar(0.97))
    wait(.5)"""





<<<<<<< HEAD

Toolbox.payment_entry('payment','check','65432','sikuli test client','9/10/2014','678','Test Descriptions')
"""
=======
"""
Toolbox.payment_entry('payment','check','65432','sikuli test client','9/10/2014','678','Test Descriptions')

>>>>>>> origin/master
Toolbox.payment_entry('payment','check','8079','sikuli','9/11/2014','110','Test description')

Toolbox.credit_entry('credit','sikuli','9/8/2014','1010','test credit description')

Toolbox.writeoff_entry('write','sikuli','9/7/2014','50','write off test description')

Toolbox.refund_entry('refund','sikuli','9/8/2014','25','refund test description')

Toolbox.invoice_entry('invoice','sikuli','9/8/2014','invoice test description','1009','100','25','50','15','5','n')
Example #27
0
index = []
knotvector_u = []
knotvector_v = []

cs = int(sys.argv[1])

for case in cases:
    case_str = tb.get_full_case_id(case)
    print(case_str)

    nii0 = nib.load(data_path + case_str + '/segmentation.nii.gz')
    seg = nii0.get_data().astype(np.uint8)

    rect_size = np.array([30, 3, 3])
    margin = rect_size[0]
    seg = tb.add_margin(seg, margin)

    seg = np.where(seg == 2, 0, seg).astype(np.uint8)

    # 右腎臓のみを抽出
    removed_seg = tb.remove_left_kidney(seg)
    # contour_pts = tb.get_full_pts(seg)
    # kmeans = KMeans(n_clusters=2,random_state=173).fit(contour_pts)
    # pred = np.array(kmeans.labels_)
    # selected_clu = int(kmeans.cluster_centers_[0,2] < kmeans.cluster_centers_[1,2])
    # not_selected = contour_pts[pred != selected_clu,:]
    # removed_seg = seg.copy()

    # for i in range(not_selected.shape[0]):
    #     removed_seg[not_selected[i,0],not_selected[i,1],not_selected[i,2]] = 0
Example #28
0
    NFront = n
    return flg


def DrawContour():
    global dst
    dst = np.copy(img)
    for i in range(NFront - 1):
        dst = cv2.rectangle(dst, (int(Front[i, 1]), int(Front[i, 0])),
                            (int(Front[i, 1]) + 1, int(Front[i, 0]) + 1),
                            (100, 100, 100))


# img = cv2.imread('LevelSet/sample.bmp')
# dst = np.copy(img)
gray = tb.make_sphere_voxel(gridsize, gridsize / 2, gridsize / 4).astype(
    np.uint8) * 255
print(gray.dtype)

# cv2.namedWindow('Image', 1)
# cv2.resizeWindow('Image',gridsize, gridsize)

InitializeCircleMap()
print(NCircleMap)

InitializeFrontPosition()
# print(status)
# cv2.imshow('Image', 50 * status)
# cv2.waitKey(0)

reset = 1
Fs = 0
Example #29
0
removed = True

# cases = [1,18,22,23,29,50,52,59,71,73,75,76,78,80,82,97,98,106,109,114,127,128,135,144,166,167,170,171,173,175,179,185,196,203,209]
cases = [52]
data_path = '/Users/shomakitamiya/Documents/python/snake3D/data/ValidationData/'
output_path = '/Users/shomakitamiya/Documents/python/snake3D/data/RemovedW7DataLong/'

for case in cases:
    case_str = tb.get_full_case_id(case)
    print(case_str)

    nii0 = nib.load(data_path + case_str + '/imaging.nii.gz')
    img = nii0.get_data()

    tb.show_ct_image(img)

    # # 球のパラメータ
    # res = 300
    # center = [res/2, res/2 + 20, res/2 + 40]
    # radius_true = 40

    # # ノイズを乗せる
    # img = tb.make_sphere_voxel_v(res,center,radius_true)

    # tb.show_image_collection(img)

    rect_size = np.array([20, 6, 6])

    center = np.array([27, 305, 253])
    normal = np.array([0, 1, 0])

# 球の輪郭を抽出するデモ

# 球のパラメータ
# res = 100
# center = res/2
# radius_true = 25

# # ノイズを乗せる
# img = 0.2*tb.make_sphere_voxel(res,center,radius_true)
# img += 0.8*np.random.rand(res,res,res)

# tb.show_image_collection(img)

img = tb.load_test_medical_image()
tb.show_image_collection(img)

filter_size = 11

img = tb.add_margin(img, filter_size)
filtered_img = np.zeros(img.shape)

center = int(filter_size / 2)
radius = 5

sfilter = tb.make_sphere_voxel(filter_size, center, radius)

tb.show_image_collection(sfilter)

w, h, d = sfilter.shape
    def kMeansOverlay(self):
        ###########################################################
        # Only use band 1 to read x,y pixel locations
        pixelInfo = pd.read_csv("Clustering/PixelInfo_Band1.csv",
                                sep='\t',
                                encoding='utf-8')
        indicesPd = pd.read_csv(
            'Clustering/Clustering_results/label_7classes.tsv',
            sep='\t',
            encoding='utf-8')
        indices = indicesPd['0'].astype(int)
        indiceCluster = (indices == 7)
        pixelCluster = pixelInfo[indiceCluster]

        #        ''' Tensorflow clustering '''
        #        n_features = 220
        #        n_clusters = 10
        #        n_samples_per_cluster = int(len(pixel_info) / n_clusters)
        #        seed = 700
        #        embiggen_factor = 70
        #
        #
        #        Band_1 = tf.convert_to_tensor(seqArray, np.float32)
        #        initial_centroids = choose_random_centroids(Band_1, n_clusters)
        #        nearest_indices = assign_to_nearest(Band_1, initial_centroids)
        #        updated_centroids = update_centroids(Band_1, nearest_indices, n_clusters)
        #
        #        model = tf.global_variables_initializer()
        #        with tf.Session() as session:
        #            sample_values = session.run(Band_1)
        #            updated_centroid_value = session.run(updated_centroids)
        #            nearest_indices_value = session.run(nearest_indices)

        ############################################################
        fileNameDir = "Data/1999_2017_landsat_time_series/roi/cut/fileNames_220.txt"
        if 'fileNameDir' in locals():
            pass
        else:
            raise AssertionError("FILE_NAME_DIR is not given!")

        with open(fileNameDir, "r") as file:
            imageFileNames = file.readlines()  # imageFiles is a list
            file.close()

        ############# Mask Qimage ###################################
        imageWidth = 3030
        imageHeight = 1474
        bytesPerPixel = 4  # 4 for RGBA
        maskData = np.zeros((imageHeight, imageWidth, 4), dtype=np.uint8)
        # maskData[:, :, 3] = 100

        for idx, row in pixelCluster.iterrows():
            pixel_x = row['x_pixel']
            pixel_y = row['y_pixel']
            maskData[pixel_y:pixel_y + 5, pixel_x:pixel_x + 5, 0] = 0
            maskData[pixel_y:pixel_y + 5, pixel_x:pixel_x + 5, 1] = 0
            maskData[pixel_y:pixel_y + 5, pixel_x:pixel_x + 5, 2] = 255
            maskData[pixel_y:pixel_y + 5, pixel_x:pixel_x + 5, 3] = 70

        mask = QImage(maskData, imageWidth, imageHeight,
                      imageWidth * bytesPerPixel, QImage.Format_ARGB32)

        painter = QPainter()

        for fileName in imageFileNames:
            fileName = "Data/1999_2017_landsat_time_series/roi/cut/" + fileName[:
                                                                                -1] + ".tif"

            raster = gdal.Open(fileName)
            data = Toolbox.readGeotiff(raster)

            # If data is more than one band!
            if len(data.shape) != 3:
                continue
            if data.shape[0] < 3:
                continue
            x_size = data.shape[2]
            y_size = data.shape[1]
            '''
            ###########
            img = QImage(fileName)
            ptr = img.bits()
            ptr.setsize(img.byteCount())

            ## copy the data out as a string
            strData = ptr.asstring()
            ## get a read-only buffer to access the data
            buf = memoryview(ptr)
            ## view the data as a read-only numpy array            
            arr = np.frombuffer(buf, dtype=np.ubyte).reshape(img.height(), img.width(), 4)
            ## view the data as a writable numpy array
            arr = np.asarray(ptr).reshape(img.height(), img.width(), 4)
            ############            
            '''

            # Choose the band:

            ### Bug solved - data type should be declared earlier!
            imageNew = np.zeros((y_size, x_size, 4), dtype=np.uint8)
            imageNew[:, :, 0] = data[2, :, :]
            imageNew[:, :, 1] = data[1, :, :]
            imageNew[:, :, 2] = data[0, :, :]
            #           if data.shape[0] == 6:
            #                imageNew[:, :, 0] = data[2, :, :]
            #                imageNew[:, :, 1] = data[1, :, :]
            #                imageNew[:, :, 2] = data[0, :, :]
            #            else:
            #                imageNew[:, :, 0] = data[3, :, :]
            #                imageNew[:, :, 1] = data[3, :, :]
            #                imageNew[:, :, 2] = data[3, :, :]
            imageNew[:, :, 3] = 255

            qimg = QImage(imageNew, x_size, y_size, QImage.Format_RGB32)

            # put overlay mask
            painter.begin(qimg)
            painter.drawImage(0, 0, mask)
            painter.end()

            self.imageLabel.setPixmap(QPixmap.fromImage(qimg))

            self.fitToWindow()
            self.updateActions()
            app.processEvents()

            # Change the title
            QMainWindow.setWindowTitle(self, fileName)

            time.sleep(0.2)
Example #32
0
    onAppear(Pattern("SageTimeslip-1.png").similar(0.96), type("n"))
    
    waitVanish(Pattern("SageTimeslip-1.png").similar(0.96))
    assert exists(Pattern("Expense.png").similar(0.97))
    type(Key.ESC)
    wait(.5)
    onAppear(Pattern("SageTimeslip-1.png").similar(0.96), type("y"))
    assert exists(Pattern("Time.png").similar(0.97))
    wait(.5)"""






Toolbox.payment_entry('payment','check','65432','sikuli test client','9/10/2014','678','Test Descriptions')
"""
Toolbox.payment_entry('payment','check','8079','sikuli','9/11/2014','110','Test description')

Toolbox.credit_entry('credit','sikuli','9/8/2014','1010','test credit description')

Toolbox.writeoff_entry('write','sikuli','9/7/2014','50','write off test description')

Toolbox.refund_entry('refund','sikuli','9/8/2014','25','refund test description')

Toolbox.invoice_entry('invoice','sikuli','9/8/2014','invoice test description','1009','100','25','50','15','5','n')

Toolbox.reverse_entry('reverse','sikuli','9/8/2014','reverse test description')

Toolbox.transfer_entry('transfer','sikuli','9/8/2014','15','transfer test description')
Example #33
0
    def performBias(self):
        extrainfo = dict()
        datadict = dict()
        
        gateDevice = self.__devicecontroller.getDeviceMappedToNode('Vg')
        drainDevice = self.__devicecontroller.getDeviceMappedToNode('Vd')
        sourceDevice = self.__devicecontroller.getDeviceMappedToNode('Vs')

        if gateDevice == None or drainDevice == None or sourceDevice == None:
            self.__logger.log(Logger.ERROR,"Three devices should be connected to the three different nodes before a bias run can be executed.")
            return
        
        drainDevice.set_output_volts(1)
        drainDevice.set_output_on()
        sourceDevice.set_output_volts(0)
        sourceDevice.set_output_on()
        self.__plotcontroller.clearPlot()
        tijd_lijst = Toolbox.makeTime(0, self.totaltime, self.nrDecades)
        self.__logger.log(Logger.INFO,'Stress times: '+' - '.join([str(x) for x in tijd_lijst]))
        self.__logger.log(Logger.INFO,'Starting Bias run with gate bias stress : %g V and drain stress : %d V'%(self.gate_bias,self.drain_bias))
        self.totalpbar.setMinimum(0)
        self.totalpbar.setMaximum(self.totaltime)
        extrainfo['total time'] = str(self.totaltime)+" s"
        extrainfo['number of decades']=str(self.nrDecades)
        extrainfo['stress times'] = ' - '.join([str(x) for x in tijd_lijst])
        extrainfo['gate bias stress'] = str(self.gate_bias)+" V"
        extrainfo['drain bias stress'] = str(self.drain_bias)+" V"
        extrainfo['sweep gate start'] = str(self.start)+" V"
        extrainfo['sweep gate end'] = str(self.stop)+" V"
        extrainfo['sweep drain'] = str(self.tft_drain)+" V"
        direction = "negative"
        if self.__ui.positiveBiasDirection.isChecked() == True:
            direction = "positive"
        base_name = self.construct_filename()
        filewriter = BiasFileWriter(self.__wafercontroller.get_current_wafer_dir()+"/"+base_name+".bias",self.__logger)
        filewriter.writeHeader(extrainfo, direction, self.totaltime)
        self.runActive = True
        self.__ui.actionBiasRun.setEnabled(False)
        self.__ui.actionAbortBiasStress.setEnabled(True)
        self.__ui.tftwidget.setEnabled(False)
        init_time = time.time()
        for t in range(0, len(tijd_lijst)):
            QtGui.QApplication.processEvents()
            self.__ui.currentCycleStatus.setText('Cycle %d: 0 / %d sec' %(t,tijd_lijst[t]))
            start = time.time()
            if t != 0:
                eind = start + tijd_lijst[t] - tijd_lijst[t-1]
                ctime = start - tijd_lijst[t-1]
            elif t==0:
                eind = start + tijd_lijst[t]
                ctime = start
                
            self.crono = Crono()
            self.crono.tick.connect(self.totalpbar.setValue)
            self.crono.status.connect(self.__ui.currentCycleStatus.setText)
            self.crono.start(tijd_lijst, t, eind, ctime)
            if self.runActive == False:
                self.__logger.log(Logger.WARNING,"Bias run aborted on user's request.")
                self.resetBias()
                return
            self.__ui.currentCycleStatus.setText('Cycle %d: %d / %d sec' % (t,tijd_lijst[t], tijd_lijst[t]))
            self.__logger.log(Logger.INFO,'Sweeping...')
            
            gate_smu = gateDevice.getScriptSyntax()
            drain_smu = drainDevice.getScriptSyntax()
            vgs, igs, ids = self.__tftcontroller.performSweep(gateDevice,drainDevice,gate_smu,drain_smu,self.start, self.stop,1,self.step,self.delay,False)
            self.__logger.log(Logger.INFO,'Sweep on timestamp %d sec - done' % (tijd_lijst[t]))
            self.__plotcontroller.plotIV_bias(ids,vgs)
            gateDevice.set_output_on()
            drainDevice.set_output_on()
            
            #Apply Bias
            gateDevice.set_output_volts(self.gate_bias)
            drainDevice.set_output_volts(self.drain_bias)
            t = Thread(target=filewriter.appendSweepData,args=(str(tijd_lijst[t]),Data.BiasPacket(igs, ids, vgs)))
            t.start()
            
        end_time = time.time()  
        gateDevice.set_output_off()
        drainDevice.set_output_off()
        self.__logger.log(Logger.INFO, 'Bias run completed in %d sec' %(end_time - init_time))
        self.__plotcontroller.saveCurrentPlot(self.__wafercontroller.get_current_wafer_dir()+"/"+base_name+".png")
        self.resetBias()
Example #34
0
emiss = em[:,1] 

"""Treat measurements as a """

"""Extract some nadir BB measurements only"""
print "Computing NBB radiances"
dates_select = []
rad_nadir = []   
new_select = []     
count = 0
for k in range(l):  
    count+=1 
    if count%120==0: 
        if selection[k] == 1:
            dates_select+=[dates[k]]            
            rad_nadir+=[Toolbox.radiance(t_nadir[k]+273.15,all_filters[j],emiss_wls,emiss,24+273.15) for j in list_filters]

n_select = len(dates_select)  
rad_nadir=reshape(array(rad_nadir),(n_select,11))

dnum = array([date2num(dates[k]) for k in range(l)])

if meas == "SET1":
    series = FirrSeries("/media/quentin/LACIE SHARE/FIRR_measurements/LR-TECH/SET1")
elif meas == "SET2":    
    series = FirrSeries("/media/quentin/LACIE SHARE/FIRR_measurements/LR-TECH/SET2")
sequences = series.sequences
series.get_temperature()
temp_firr = series.temperature
temp_firr_time = series.temp_time   
Example #35
0
 def touch(self):
     # Update timestamp on host.
     self.network.node[self]['updated'] = Toolbox.timestamp()
Example #36
0
    # nn, li, cu = tb.computeBasePSNRs(ent, downsampledFilename=inputEnt)
    nn = tb.PSNR()
    li = tb.PSNR()
    cu = tb.PSNR()

    psnr = tb.PSNR()
    psnr.set(blobs["psnr"][0, 0, 0, 0], blobs["psnr_y"][0, 0, 0, 0])

    print "nn=%5s, li=%5s, cu=%5s, net=%5s" % (nn, li, cu, psnr)

    return (nn, li, cu, psnr)


results = {}
for set in image_sets:
    list = tb.readTupleList("%s/%s-list.txt" % (dataset_path, set))

    nn_list = tb.PSNRList()
    li_list = tb.PSNRList()
    cu_list = tb.PSNRList()
    net_list = tb.PSNRList()
    for ent in list:
        print ""

        filename = ent[0]
        if "comic" in filename or "ppt3" in filename or "zebra" in filename:
            tb.notice("skipping %s" % filename, "del")
            continue

        inputFilename = filename.replace(".ppm", ".caffe.downsampled.ppm")
        # if len(ent)==2:
Example #37
0
# cases = [1,18,22,23,29,50,52,59,71,73,75,76,78,80,82,97,98,106,109,114,127,128,135,144,166,167,170,171,173,175,179,185,196,203,209]
case = 170
case_str = tb.get_full_case_id(case)
# data_path = '/Users/shomakitamiya/Documents/python/snake3D/data/TestData/'
data_path = '/Users/shomakitamiya/Documents/python/snake3D/data/ValidationData/'

nii0 = nib.load(data_path + case_str + '/imaging.nii.gz')
img = nii0.get_data()

nii0 = nib.load(data_path + case_str + '/segmentation.nii.gz')
seg = nii0.get_data()

img_tmp = np.where(1 <= seg, img, 0)
print(np.mean(img_tmp))
print(np.max(img_tmp))
print(np.min(img_tmp))

center = 0
width = 300
wmax = center + width
wmin = center - width

img = np.where(wmax < img, wmax, img)
img = np.where(img < wmin, wmin, img)

img = (img - wmin) / (wmax - wmin) * 255
img = img.astype(np.uint8)

tb.show_image_collection(img)
Example #38
0
from pylab import *
import os
import glob
from scipy.integrate import trapz, simps
import Toolbox

h = 6.62e-34
c = 3e8
kb = 1.38e-23

epsilon_wls = array([1, 50])
epsilon = array([1, 1])
Tpm = 300

filtres_ref = ["open", "F0007", "F0008", "F0009", "F0034", "F0035", "F0036", "F0010", "F0011", "F0014"]
n = len(filtres_ref)

temp = linspace(51, 350, 270)
l = len(temp)
LUT_radiances = zeros([l, n + 1])
LUT_radiances[:, 0] = temp[:]

for k, f in enumerate(filtres_ref):
    for i, t in enumerate(temp):
        print t
        LUT_radiances[i, k + 1] = Toolbox.radiance(t, f, epsilon_wls, epsilon, 300)

savetxt("LUT_radiances_new.txt", LUT_radiances)
Example #39
0
data_path = '/Users/shomakitamiya/Documents/python/snake3D/data/contours/kidney_EN/'
img_path = '/Users/shomakitamiya/Documents/python/snake3D/data/kits19/data/'

#複数の初期輪郭に対して分離度膜を適用する

window_center = 0
window_width = 300

for case in cases:
    case_str = tb.get_full_case_id(case)
    print(case_str)

    #ボリュームデータの読み込み
    nii0 = nib.load(img_path + case_str + '/imaging.nii.gz')
    img = nii0.get_data()
    img = tb.window_function(img, window_center, window_width)

    init_path = data_path + case_str
    rect_size = np.array([30, 6, 6])

    result_surfs = []

    for f in os.listdir(init_path):
        init = import_json(init_path + '/' + f)[0]
        final = tb.separability_membrane(img, init, rect_size)
        result_surfs.append(final)

    save_path = init_path + '/result'
    os.makedirs(save_path, exist_ok=True)

    for i, s in enumerate(result_surfs):
Example #40
0
import sys
sys.path.append('/home/kitamiya/Documents/python/snake3D/src/Toolbox')
import matplotlib.pyplot as plt
import numpy as np
import cv2
import math
import Toolbox as tb

w, h, d = (20, 20, 20)
rect_size = (8, 2, 2)

image = np.zeros((w, h, d))
image[int(w / 2):, :, :] = 1

center = np.array([int(w / 2) + 2, int(h / 2), int(d / 2)], dtype='float32')
neighborhood = np.array([center, center, center, center])
neighborhood[0] += np.array([0.5, -1, 0])
neighborhood[1] += np.array([0, 0, -1])
neighborhood[2] += np.array([-0.5, 1, 0])
neighborhood[3] += np.array([0, 0, 1])

print(center)
print(tb.update_sample_point(center, neighborhood, image, rect_size))
from sklearn.neighbors import BallTree

# cases = range(150,190)
cases = [155]
data_path = '/Users/shomakitamiya/Documents/python/snake3D/data/kits19/data/'
output_path = '/Users/shomakitamiya/Documents/python/snake3D/data/KidneyContour/'

window_center = 0
window_width = 300

for case in cases:
    case_str = tb.get_full_case_id(case)
    print(case_str)

    # データ読み込み
    nii0 = nib.load(data_path + case_str + '/kidney_contour.nii.gz')
    pre = nii0.get_data()
    pre_con = tb.get_full_pts(pre)

    nii0 = nib.load(data_path + case_str + '/segmentation.nii.gz')
    gt = nii0.get_data()
    gt_con = tb.get_contour_pts(gt)

    tree = BallTree(gt_con)
    _, ind = tree.query(pre_con, k=1)
    ind = np.reshape(ind, (ind.shape[0]))

    dist_sum = 0
    for i in range(pre_con.shape[0]):
        dist_sum += np.linalg.norm(pre_con[i, :] - gt_con[ind[i], :])
 def mac(self):
     macs = self.network.findAdj(self, ntype=Mac)
     try:
         return Toolbox.getUnique(macs)
     except IndexError:
         return None
Example #43
0
os.makedirs(output_path + case_str, exist_ok=True)

# fo = open(output_path+case_str+'/output.txt', 'w')
# sys.stdout = fo

print(case_str)

nii0 = nib.load(data_path + case_str + '/prediction.nii.gz')
seg_original = nii0.get_data().astype(np.uint8)

nii0 = nib.load(data_path + case_str + '/imaging.nii.gz')
img = nii0.get_data()

rect_size = np.array([30, 3, 3])
margin = rect_size[0]
img = tb.add_margin(img, margin)
seg = tb.add_margin(seg_original, margin)

seg = np.where(seg == 2, 1, 0).astype(np.uint8)

contour_pts = tb.get_full_pts(seg)
if np.sum(seg) < 50:
    sys.exit(0)

pred = np.array(DBSCAN(eps=1).fit_predict(contour_pts))
num_clu = np.max(pred) + 1

selected_clu = np.argmax(np.array([np.sum(pred == i) for i in range(num_clu)]))
not_selected = contour_pts[pred != selected_clu, :]
removed_seg = seg.copy()
Example #44
0
import DescriptionParser as dp
import Toolbox as tb

# grammar example
test1 = dp.parse_grammar("grammar1.txt")
print(test1)

# dfa test example should accept all words where: (number of a's) - (number of b's) congruent to 3 (mod 4)
test2 = dp.parse_dfa("dfa1.txt")

w1 = "aaa"
w2 = "aaaab"
w3 = "a"
w4 = "b"
w5 = "abababaaa"
w6 = "bbbaaaaaaa"

print("{} is accepted: {} (should be: True)".format(
    w1, tb.word_accepted(test2, w1)))
print("{} is accepted: {} (should be: True)".format(
    w2, tb.word_accepted(test2, w2)))
print("{} is accepted: {} (should be: False)".format(
    w3, tb.word_accepted(test2, w3)))
print("{} is accepted: {} (should be: True)".format(
    w4, tb.word_accepted(test2, w4)))
print("{} is accepted: {} (should be: True)".format(
    w5, tb.word_accepted(test2, w5)))
print("{} is accepted: {} (should be: False)".format(
    w6, tb.word_accepted(test2, w6)))
Example #45
0
# fig = plt.figure()
# ax = Axes3D(fig)
# ax.plot(sp[:,0],sp[:,1],sp[:,2],'o',markersize=2)
# plt.show()

knot = generate(degree, n_cp)

A = np.array([[
    basis_function_one(degree, knot, k, u[i]) *
    basis_function_one(degree, knot, l, v[j]) for k in range(n_cp)
    for l in range(n_cp)
] for i in range(n_sp) for j in range(n_sp)])
print(A.shape)

invATA = np.linalg.inv(np.dot(A.T, A))
cp = np.dot(np.dot(invATA, A.T), sp).tolist()

print(cp)

surf = BSpline.Surface()
surf.degree_u = degree
surf.degree_v = degree

surf.set_ctrlpts(cp, n_cp, n_cp)

surf.knotvector_u = knot
surf.knotvector_v = knot

tb.surf_render(surf)
import matplotlib

import geomdl.visualization.VisMPL as VisMPL
import matplotlib.pyplot as plt
import numpy as np
from geomdl import BSpline
from geomdl.fitting import approximate_surface
from geomdl.knotvector import generate
from mpl_toolkits.mplot3d import Axes3D  # noqa: F401 unused import
from skimage.segmentation import flood_fill

import Toolbox as tb

rect_size = np.array([20, 6, 6])

# img = tb.load_test_medical_image()
# tb.show_ct_image(img)

margin = rect_size[0]
inside = tb.load_test_medical_image()
# tb.show_ct_image(inside)

img_shape = tuple([s + 2 * margin for s in inside.shape])

img = np.zeros(img_shape)
img += np.min(inside)
img[margin:img_shape[0] - margin, margin:img_shape[1] - margin,
    margin:img_shape[2] - margin] += inside - np.min(inside)

tb.show_ct_image(img)
Example #47
0
knotvector_v = np.load(
    '/Users/shomakitamiya/Documents/python/snake3D/data/numpy/' + method +
    str(cs) + '_knotvector_v.npy')

print(init.shape)

dice_sum = 0
print(cases.shape)

for i, case in enumerate(cases):
    case_str = tb.get_full_case_id(case)
    print(case_str)

    nii0 = nib.load(data_path + case_str + '/segmentation.nii.gz')
    gt = nii0.get_data().astype(np.uint8)
    gt = tb.add_margin(gt, 30)

    surf = BSpline.Surface()

    surf.degree_u = 3
    surf.degree_v = 3

    ctrpts = reshaping(init[i, :], cs, cs)
    surf.set_ctrlpts(ctrpts, cs, cs)

    surf.knotvector_u = knotvector_u[i, :]
    surf.knotvector_v = knotvector_v[i, :]

    img_mask = tb.get_image_mask(gt, surf)
    prediction = tb.fill_contour(img_mask)
Example #48
0
Thot = sequence.temp_hbb[i_hbb]                  # exact HBB temperature at measurement
Tpma1 = sequence.temp_pm[i_abb1]                 # pointing mirror temperature at ABB measurement
Tpmh = sequence.temp_pm[i_hbb]                   # pointing mirror temperature at HBB measurement
tamb1 = all_tms[k,0]
thot = all_tms[k,1]-tamb1
tscene = all_tms[k,2:,None]-tamb1
amb1 = all_mean[k,0,:]
hot = all_mean[k,1,:]
scene = all_mean[k,2:,:]
              
Tamb2 = Tamb1
Tpma2 = Tpma1
tamb2 = tamb1 
amb2 = amb1
       
G,B0,rad_scene,bt = Toolbox.get_calib(amb1,hot,scene,amb2,thot,tscene,tamb2,Tamb1,Thot,Tamb2,filters[0],emiss_wls,emiss,Tpma1,Tpmh,Tpma2)               

new_G = zeros([4800])
new_B0 = zeros([4800])

new_G[illuminated] = G[illuminated]
new_B0[illuminated] = B0[illuminated]
new_B0[non_illuminated] = B0[non_illuminated]

#new_G = G
#new_B0 = B0

fig,(ax1,ax2) = subplots(2,1,figsize=(8,10))
fig.subplots_adjust(bottom=0.1,left=0.05,right=0.95,hspace=0.3)

cs1 = ax1.imshow(reshape(new_G,(60,80)),vmin=-34,vmax=-31.5,cmap=cm.gist_rainbow,interpolation='none')
    nii0 = nib.load(data_path + case_str + '/prediction.nii.gz')
    seg_original = nii0.get_data().astype(np.uint8)

    nii0 = nib.load(data_path + case_str + '/imaging.nii.gz')
    img = nii0.get_data()

    # img = tb.threshold_image_seg(img,seg)
    # t_min = -80
    # t_max = 200
    # img = tb.threshold_image_minmax(img,t_min,t_max)

    # tb.show_ct_image(img)

    rect_size = np.array([30, 3, 3])
    margin = rect_size[0]
    img = tb.add_margin(img, margin)
    seg = tb.add_margin(seg_original, margin)

    seg = np.where(seg == 2, 1, 0).astype(np.uint8)

    # contour_pts = tb.get_contour_pts(seg)

    # pred = np.array(DBSCAN(eps=5).fit_predict(contour_pts))
    # num_clu = np.max(pred) + 1

    # c_no = np.argmax(np.array([np.sum(pred==i) for i in range(num_clu)]))

    contour_pts = tb.get_full_pts(seg)
    if np.sum(seg) < 50:
        print(case_str + ' is skipped because seg is all zero.')
        nii1 = nib.Nifti1Image(seg_original, affine=None)
gt = nii0.get_data().astype(np.uint8)

nii0 = nib.load(data_path + case_str + '/prediction.nii.gz')
seg = nii0.get_data().astype(np.uint8)

nii1 = nib.load(output_path + case_str + '/prediction_contour.nii.gz')
con_seg = nii1.get_data()

nii1 = nib.load(data_path + case_str + '/contour_img.nii.gz')
contour_img = nii1.get_data()
# margin = 30
# contour_img = contour_img[margin:contour_img.shape[0]-margin,margin:contour_img.shape[1]-margin,margin:contour_img.shape[2]-margin]

# con_seg = con_seg[15:con_seg.shape[0]-15,15:con_seg.shape[1]-15,15:con_seg.shape[2]-15]

# nii1 = nib.Nifti1Image(con_seg,affine=None)
# nib.save(nii1,'/Users/shomakitamiya/Documents/python/snake3D/data/Data/' + case + '/contour_seg.nii.gz')

print(con_seg.shape)
print(con_seg.dtype)
print(np.max(con_seg))

# tb.show_ct_image(gt)
# tb.show_ct_image(seg)
# tb.show_ct_image(con_seg)

tb.show_ct_image(tb.draw_segmentation(img, gt, mark_val=2))
tb.show_ct_image(tb.draw_segmentation(img, seg, mark_val=2))
# tb.show_ct_image(tb.draw_segmentation(img,contour_img,mark_val=255))
tb.show_ct_image(tb.draw_segmentation(img, con_seg, mark_val=2))
Example #51
0
    def get_radiance(self,list_filters,method="next",spav="all"): 
        """Compute calibration for all filters indicated, correcting if necessary for the temperature drift between BB and scene measurements
        method : "next" to use following sequence to interpolate background signal"""
        self.organized(spav=spav)
        nview = self.npos-2   # number of scene measurements in a complete sequence
        
        if spav == "all":
            all_mean = self.all_mean
            l = 2           
        else:
            all_mean = self.all_mean[:,:,[illuminated]] # calculations on illuminated pixels only  
            l = len(illuminated)
            
        all_tms = self.all_tms
        all_radiance = zeros([11,nview,l])  # Radiance (W/m2/sr) for each filter, several BT in a sequence if several nadir or zenith views     
        all_bt = zeros([11,nview,l])      # Brightness temperature for each filter,  
        offset = zeros([11,l])
        gain = zeros([11,l])       
        
        if method == "next":
            next_seq = FirrSequence(self.next,self.filter_pos,"nocare")                     
            dtime = next_seq.get_time_seq()-self.get_time_seq() 
            dt = 1000*dtime.total_seconds() # in ms
                          
            next_seq.organized(spav = spav)
            
            if spav == "all":
                next_all_mean = next_seq.all_mean[:,:,:]
            else:
                next_all_mean = next_seq.all_mean[:,:,[illuminated]]
                
            next_all_tms = next_seq.all_tms                      
                      
        for fil in list_filters:
            k = filters_positions.index(fil)
            i_abb1 = searchsorted(self.temp_tms,all_tms[k,0]) 
            Tamb1 = self.temp_abb[i_abb1]                # exact ABB temperature at measurement
            i_hbb = searchsorted(self.temp_tms,all_tms[k,1]) 
            Thot = self.temp_hbb[i_hbb]                  # exact HBB temperature at measurement
            Tpma1 = self.temp_pm[i_abb1]                 # pointing mirror temperature at ABB measurement
            Tpmh = self.temp_pm[i_hbb]                   # pointing mirror temperature at HBB measurement
            tamb1 = all_tms[k,0]
            thot = all_tms[k,1]-tamb1
            tscene = all_tms[k,2:,None]-tamb1
            amb1 = all_mean[k,0,:]
            hot = all_mean[k,1,:]
            scene = all_mean[k,2:,:]
            
            if method == "next":
                i_abb2 = searchsorted(next_seq.temp_tms,next_all_tms[k,0]) 
                Tamb2 = next_seq.temp_abb[i_abb2]            # exact ABB temperature for next sequence calibration
                Tpma2 = next_seq.temp_pm[i_abb2]             # pointing mirror temperature at next ABB measurement
                tamb2 = dt + next_all_tms[k,0]-tamb1             # time after first ambient measurement
                amb2 = next_all_mean[k,0,:]      
                
            else: # no interpolation in this case                
                Tamb2 = Tamb1
                Tpma2 = Tpma1
                tamb2 = tamb1 
                amb2 = amb1
                  
               
            # Calibration : S = B0 + G * rad_scene   
            G,B0,rad_scene,bt = Toolbox.get_calib(amb1,hot,scene,amb2,thot,tscene,tamb2,Tamb1,Thot,Tamb2,fil,emiss_wls,emiss,Tpma1,Tpmh,Tpma2)               
            all_bt[k,:,:] = bt 

            all_radiance[k,:,:]=rad_scene
            offset[k,:] = B0
            gain[k,:] = G      
                 
        self.all_bt = mean(ma.masked_equal(all_bt,0),axis=2) # contains 0 where not calculated  
        self.all_radiance = mean(ma.masked_equal(all_radiance,0),axis=2)
        self.offset = offset
        self.gain = gain 
 def host(self):
     hosts = self.network.findAdj(self, ntype=Host)
     return Toolbox.getUnique(hosts)