Ejemplo n.º 1
0
def guiCall():
    print "gui"
    GUI.main()
    mask= np.genfromtxt(os.path.join(projPath,'mask.txt'),'float')
    print mask
    main_directory= os.path.join(projPath,"input")
##        with open(os.path.join(projPath,'tempGUI.txt'))as datafile:
##            database= datafile.readlines()
    datafile= os.path.join(projPath,'tempGUI.txt')
    with open(datafile,'r') as data :
        filename= data.readline()[:-1]
    print filename
    seg.crop_image(seg_image,mask,datafile,filename,main_directory)   
def main(argv):
    '''
    main method
    '''
    start_time = time.time()
    
    # Extract features from the given data.

    p=Preprocess_Extract() 
   
    # This contaions list of files for training and testing .2/3 for training and 1/3rd for testing
    # Get list of train files
    f=open('split_files.txt','r')

    # Get files for train data
    files=f.readline()
    files=f.readline()
    files=files.strip("Set([")
    files=files.strip("])\n")
    list_files=files.split(', ')
    train_files=Set(list_files)

    #Get files for test data    
    files=f.readline()
    files=f.readline()
    files=files.strip("Set([")
    files=files.strip("])\n")
    list_files=files.split(', ')
    test_files=Set(list_files)

    # Load Classifier 
    print 'Loading Classifier\n'
    f_read_Classifier= open ('f_classifier','rb')
    rfc = cp.load(f_read_Classifier)

    # load relationship object
    print 'Loading Relational Classifier\n'
    f_read_rel=open('f_rel_classifier','rb')
    rel_rfc= cp.load(f_read_rel)
    
    # Need to specify path where the inkml files are located.
    file_path_till_Traininkml='/home/sbp3624/PatternRecog/TrainINKML_v3/'
    s= Segmentation()
    print "Classification , Segmentation and Parsing for Testing data started \n"
    str_opt="Test"

    # First segmenting the data 
    s.sym_segmentation(rfc,file_path_till_Traininkml,str_opt,rel_rfc)
    
    print 'Done!!!!'
    print "Total Time Taken= %f" %(time.time()-start_time)
Ejemplo n.º 3
0
def rtTopics(dire,direo,topicnum,topicwordsnum,iternum=1000):
    """
    Return Topics. dire is the path of the filepath, topicnum is the
    number of topics, topicwordsnum is the number of words in every topic.
    """
    model=lda.LDA(n_topics=topicnum,n_iter=iternum,random_state=1)
    for filename in os.listdir(dire):
        vocab=()
        wordsdic={}
        filenum=0
        filename=filename.decode('gbk').encode('utf-8')
        fdir=dire+filename
        SegResults=seg.rtSegResult(fdir, True, True)
        seg.FileSegResult(direo+'words'+str(filenum+1)+'.txt', SegResults)
        seg.PlotWordCloud(SegResults,  'E:/Other/USTB/nlp/WordCloud/tag_cloud'+str(filenum+1)+'.jpg')
        for s in SegResults:
            sf=s[0].encode('utf-8')
            if wordsdic.has_key(sf):
                tmpdict=wordsdic[sf].copy()
                tmpdict[filenum]=s[1]
                wordsdic[sf]=tmpdict.copy()
            else:
                tmpdict={}
                tmpdict[filenum]=s[1]
                wordsdic[sf]=tmpdict.copy()
        filenum+=1
        vocab=tuple(wordsdic.keys())
        X=zeros((filenum,len(vocab)),dtype=int)
        wordnum=0
        for w in wordsdic.keys():
            tmpdict=wordsdic[w].copy()
            for t in tmpdict.keys():
                X[t][wordnum]=tmpdict[t]
            wordnum+=1
        print X.sum()
        model.fit(X)
        topic_word = model.topic_word_
        n_top_words = topicwordsnum
        fileout=codecs.open(direo+'topics'+str(filenum)+'.txt','w','utf-8')
        for i, topic_dist in enumerate(topic_word):
            topic_words = array(vocab)[argsort(topic_dist)][:-(n_top_words+1):-1]
            fileout.write(('Topic {}: {}'.format(i, '+'.join(topic_words))+'\n').decode('utf-8'))
        fileout.close()
Ejemplo n.º 4
0
    return cluster_result


def WriteBin(cluster_result):
    """
    将聚类结果写入二进制文件以便提取协议关键词使用
    :param cluster_result:聚类结果
    """
    for i in range(len(cluster_result)):
        filename = "result" + str(i) + ".bin"
        f = open(filename, "wb")
        pickle.dump(cluster_result[i], f)
        f.close()


if __name__ == '__main__':
    message_list = PCAPReader.ImportMessage()
    # result_split = SplitCluster(message_list,15)
    max_lenth = Segmentation.GetSplitMaxLen(message_list)
    split_list = Segmentation.AllSplitList(message_list, max_lenth)
    cluster_result = MessageCluster(message_list, split_list)
    cluster_result = DeleteRedun(cluster_result)
    WriteBin(cluster_result)
    for i in range(len(cluster_result)):
        print("")
        print("number " + str(i) + " cluster:")
        for j in range(len(cluster_result[i])):
            print("\n")
            for k in range(len(cluster_result[i][j])):
                print(cluster_result[i][j][k])
Ejemplo n.º 5
0
#         # df.to_csv('SLIC_segments/' + filename.replace(".jpg", "") + '.csv', sep=',', header= False, index=False)
#         seg.plot_segments('SLIC_figures/' + filename.replace("jpg", "png"), image, seg)

# ## Process data using Hough
# for filename in os.listdir('Test_Data2/'):
#     if filename.endswith(".jpg"):
#         print('Processing file: ' + filename)
#         filepath = os.path.join('Test_Data2/', filename)
#         image = io.imread(filepath)
#         max_peak, edges = seg.Hough(image)
#         seg.Hough_plot('Hough_figures/' + filename.replace("jpg", "png"), max_peak, image, edges)

# Process data using FCN
filepath = "Test_Data/test_image15.jpg"
image = io.imread(filepath)
seg.processPrototxt(image, 8)
net = seg.load_FCN(layer=8)
# net = load_FCN(layer=16)
# net = load_FCN(layer=32)
transformer = seg.get_transformer(net)
result8 = seg.process_FCN(filepath, net, transformer)
# result16 = process_FCN(filepath, net16, transformer)
# result32 = process_FCN(filepath, net32, transformer)
# image = transform.resize(image, (500,400))
seg.plot_FCN_water('Combine/' + "water_seg.png",
                   image,
                   result8[0, 57],
                   net,
                   transformer,
                   p=0,
                   normalize=False)
Ejemplo n.º 6
0
        print "assign_color done in ", time2 - time1, " s"
        self.set_canvascolor()
        im = Image.new("RGB", (400, 400), (255, 255, 255))
        for obj in self.layers.segs:
            for subseg in obj.subsegment:
                slayer = Seg_layer(subseg.edge, subseg.pix, self.canvas.canvas,
                                   im)
                slayer.render()
        '''
        for n in range(self.canvas.canvas.shape[0]):
            for p in range(self.canvas.canvas.shape[1]):
		if(self.canvas.canvas[n][p][3]<1.0):
                	print self.canvas.canvas[n][p][3]
	'''
        plt.imshow(self.canvas.canvas)
        plt.show()


if __name__ == '__main__':
    sg = Segmentation()
    sg.imread('ball.jpg')
    sg.set_no(1)

    sg.set_ns(1)
    sg.segment()
    canvas = Canvas()
    canvas.set_canvas(250, 250)
    canvas.set_paper('paper.jpg')
    pt = Painter(sg, canvas)
    pt.paint()
Ejemplo n.º 7
0
# coding=utf-8
'''
Created on Sep. 29, 2015

@author: LuDan
'''
import Segmentation as seg

fdir = 'E:/Other/USTB/nlp/Text1.txt'
frfile = True
ofile = 'text.txt'

SegResults = seg.rtSegResult(fdir, frfile, True)
#get the segmentation results
seg.FileSegResult(ofile, SegResults)
#put the segmentation results to a file
seg.PlotWordCloud(SegResults, 'Ttag_cloud.jpg', 30, 20, 80)
#plot the wordcloud
Ejemplo n.º 8
0
def threadCrown(filepath):
    global io

    rtpSkel = -1
    crownT = OrderedDict()
    imgL = []
    stemCorrection = bool(int(options[8][1]))

    print io.getHomePath()
    oldHome = io.getHomePath()
    os.chdir(io.getHomePath())
    io.setHomePath('./Crown/')
    f = io.scanDir()
    for (counter, i) in enumerate(f):
        io.setFileName(os.path.basename(i))
        io.setidIdx(imgID)

        print 'processing Crown file: ' + i
        xScale = allPara[counter][7]
        yScale = allPara[counter][8]
        analysis = Analysis.Analysis(io, (xScale + yScale) / 2)
        rtp = RootTipPaths.RootTipPaths(io)

        try:
            img = scipy.misc.imread(i, flatten=True)
        except:
            print 'Image not readable'
            img = -1

        if len(img) > 0:
            seg = Segmentation.Segmentation(img, io)
            imgL = seg.label()
            print 'compute root profile'
            currT = time.time()
            if ifAnyKeyIsTrue([
                    'AVG_DENSITY', 'WIDTH_MED', 'WIDTH_MAX', 'DIA_STM_SIMPLE',
                    'D10', 'D20', 'D30', 'D40', 'D50', 'D60', 'D70', 'D80',
                    'D90', 'DS10', 'DS20', 'DS30', 'DS40', 'DS50', 'DS60',
                    'DS70', 'DS80', 'DS90', 'AREA', 'ANG_TOP', 'ANG_BTM'
            ]):
                crownT['AVG_DENSITY'], crownT['WIDTH_MED'], crownT[
                    'WIDTH_MAX'], crownT['D10'], crownT['D20'], crownT[
                        'D30'], crownT['D40'], crownT['D50'], crownT[
                            'D60'], crownT['D70'], crownT['D80'], crownT[
                                'D90'], crownT['DS10'], crownT['DS20'], crownT[
                                    'DS30'], crownT['DS40'], crownT[
                                        'DS50'], crownT['DS60'], crownT[
                                            'DS70'], crownT['DS80'], crownT[
                                                'DS90'], crownT['AREA'], crownT[
                                                    'DIA_STM_SIMPLE'], crownT[
                                                        'ANG_TOP'], crownT[
                                                            'ANG_BTM'] = analysis.getWidthOverHeight(
                                                                imgL, xScale,
                                                                yScale)
                print 'Mask traits computed ' + str(time.time() - currT) + 's'

            if ifAnyKeyIsTrue([
                    'DIA_STM', 'TD_MED', 'TD_AVG', 'STA_RANGE', 'STA_DOM_I',
                    'STA_DOM_II', 'STA_25_I', 'STA_25_II', 'STA_50_I',
                    'STA_50_II', 'STA_75_I', 'STA_75_II', 'STA_90_I',
                    'STA_90_II', 'RTA_DOM_I', 'RTA_DOM_II', 'STA_MIN',
                    'STA_MAX', 'STA_MED', 'RTA_RANGE', 'RTA_MIN', 'RTA_MAX',
                    'RTA_MED', 'NR_RTP_SEG_I', 'NR_RTP_SEG_II', 'ADVT_COUNT',
                    'BASAL_COUNT', 'ADVT_ANG', 'BASAL_ANG', 'HYP_DIA',
                    'TAP_DIA', 'MAX_DIA_90', 'DROP_50', 'CP_DIA25', 'CP_DIA50',
                    'CP_DIA75', 'CP_DIA90', 'SKL_DEPTH', 'SKL_WIDTH'
            ]):
                currT = time.time()
                skel = Skeleton.Skeleton(imgL)
                testSkel, testDia = skel.skel(imgL)
                scipy.misc.imsave(
                    os.path.join(io.getHomePath(), 'Skeleton',
                                 io.getFileName() + '_skel.png'), testSkel)
                print 'Medial axis computed ' + str(time.time() - currT) + 's'
                currT = time.time()
                path, skelGraph, crownT[
                    'DIA_STM'], skelSize = seg.findThickestPath(
                        testSkel, testDia, xScale, yScale)
                allPara[counter][10] = skelSize
                print 'Central path computed ' + str(time.time() - currT) + 's'

            if ifAnyKeyIsTrue([
                    'TD_MED', 'TD_AVG', 'STA_RANGE', 'STA_DOM_I', 'STA_DOM_II',
                    'STA_25_I', 'STA_25_II', 'STA_50_I', 'STA_50_II',
                    'STA_75_I', 'STA_75_II', 'STA_90_I', 'STA_90_II',
                    'RTA_DOM_I', 'RTA_DOM_II', 'STA_MIN', 'STA_MAX', 'STA_MED',
                    'RTA_RANGE', 'RTA_MIN', 'RTA_MAX', 'RTA_MED',
                    'NR_RTP_SEG_I', 'NR_RTP_SEG_II', 'ADVT_COUNT',
                    'BASAL_COUNT', 'ADVT_ANG', 'BASAL_ANG', 'HYP_DIA',
                    'TAP_DIA', 'MAX_DIA_90', 'DROP_50', 'CP_DIA25', 'CP_DIA50',
                    'CP_DIA75', 'CP_DIA90', 'SKL_DEPTH', 'SKL_WIDTH',
                    'RTP_COUNT'
            ]):
                print 'Compute RTP skeleton'
                currT = time.time()
                rtpSkel, crownT['RTP_COUNT'], crownT['TD_MED'], crownT[
                    'TD_AVG'], crownT['MAX_DIA_90'], rtps, tips, crownT[
                        'SKL_WIDTH'], crownT['SKL_DEPTH'] = rtp.getRTPSkeleton(
                            path, skelGraph, True)
                seg.setTips(tips)
                print 'RTP Skeleton computed ' + str(time.time() - currT) + 's'

            allPara[len(allPara) - 1][2] = seg.getFail()

            if ifAnyKeyIsTrue(['RDISTR_X', 'RDISTR_Y']):
                print 'Compute spatial root distribution'
                currT = time.time()
                crownT['RDISTR_X'], crownT['RDISTR_Y'] = analysis.getSymmetry(
                    rtps, rtpSkel)
                print 'Symmetry computed ' + str(time.time() - currT) + 's'

            if rtpSkel != -1:
                if ifAnyKeyIsTrue([
                        'NR_RTP_SEG_I', 'NR_RTP_SEG_II', 'ADVT_COUNT',
                        'BASAL_COUNT', 'ADVT_ANG', 'BASAL_ANG', 'HYP_DIA',
                        'TAP_DIA'
                ]):
                    print 'searching for hypocotyl'
                    currT = time.time()
                    branchRad, nrPaths = seg.findHypocotylCluster(
                        path, rtpSkel)
                    print 'hypocotyl computed ' + str(time.time() -
                                                      currT) + 's'
                    print 'starting kmeans'
                    try:
                        currT = time.time()
                        c1x, c1y, c2x, c2y = analysis.plotDiaRadius(
                            nrPaths, branchRad, path, 2)

                        print '2 clusters computed in ' + str(time.time() -
                                                              currT) + 's'

                        currT = time.time()
                        segImg = seg.makeSegmentationPicture(
                            path, rtpSkel, img, xScale, yScale, c1x, c1y, c2x,
                            c2y)
                        scipy.misc.imsave(
                            io.getHomePath() + '/Result/' + io.getFileName() +
                            'Seg2.png', segImg)
                        crownT['ADVT_COUNT'], crownT['BASAL_COUNT'], crownT[
                            'NR_RTP_SEG_I'], crownT['NR_RTP_SEG_II'], crownT[
                                'HYP_DIA'], crownT[
                                    'TAP_DIA'] = analysis.countRootsPerSegment(
                                        c1y, c2y, c1x, c2x)
                    except:
                        c1x = None
                        c1y = None
                        c2x = None
                        c2y = None
                        pass
                    crownT['DROP_50'] = analysis.RTPsOverDepth(path, rtpSkel)
                    print 'count roots per segment'
                    print 'Root classes computed in ' + str(time.time() -
                                                            currT) + 's'

                if ifAnyKeyIsTrue([
                        'ADVT_ANG', 'BASAL_ANG', 'STA_RANGE', 'STA_DOM_I',
                        'STA_DOM_II', 'STA_25_I', 'STA_25_II', 'STA_50_I',
                        'STA_50_II', 'STA_75_I', 'STA_75_II', 'STA_90_I',
                        'STA_90_II', 'RTA_DOM_I', 'RTA_DOM_II', 'STA_MIN',
                        'STA_MAX', 'STA_MED', 'RTA_RANGE', 'RTA_MIN',
                        'RTA_MAX', 'RTA_MED'
                ]):
                    currT = time.time()
                    lat, corrBranchpts = seg.findLaterals(
                        rtps, rtpSkel, (xScale + yScale) / 2, None)
                    print 'seg.findLaterals computed in ' + str(time.time() -
                                                                currT) + 's'
                    print 'Compute angles at 2cm'
                    currT = time.time()
                    if c1x != None and c1y != None and c2x != None and c2y != None:
                        crownT['ADVT_ANG'], crownT[
                            'BASAL_ANG'] = analysis.anglesPerClusterAtDist(
                                c1y,
                                c2y,
                                rtpSkel,
                                path,
                                lat,
                                corrBranchpts, (xScale + yScale) / 2,
                                dist=20)
                    else:
                        crownT['ADVT_ANG'] = 'nan'
                        crownT['BASAL_NG'] = 'nan'
                    print 'angles at 2cm computed in ' + str(time.time() -
                                                             currT) + 's'

                    if ifAnyKeyIsTrue([
                            'STA_25_I', 'STA_25_II', 'STA_50_I', 'STA_50_II',
                            'STA_75_I', 'STA_75_II', 'STA_90_I', 'STA_90_II'
                    ]):
                        try:
                            print 'compute quantile angles'
                            currT = time.time()
                            a25, a50, a75, a90 = analysis.calculateAngleQuantiles(
                                path, lat, corrBranchpts, rtpSkel)
                            print 'angles computed in ' + str(time.time() -
                                                              currT) + 's'
                        except:
                            a25 = ['nan']
                            a50 = ['nan']
                            a75 = ['nan']
                            a90 = ['nan']
                            print 'ERROR: No quantile angles calculated'

                    if ifAnyKeyIsTrue(
                        ['RTA_RANGE', 'RTA_MIN', 'RTA_MAX', 'RTA_MED']):
                        try:
                            print 'compute angles'
                            currT = time.time()
                            crownT['RTA_MED'], crownT['RTA_MIN'], crownT[
                                'RTA_MAX'], crownT[
                                    'RTA_RANGE'], anglesN = analysis.calculateAngles(
                                        path, lat, corrBranchpts, rtpSkel)
                            print 'RTA angle characteristics computed in ' + str(
                                time.time() - currT) + 's'
                        except:
                            print 'ERROR: No RTA angles calculated'

                    if ifAnyKeyIsTrue(
                        ['STA_RANGE', 'STA_MIN', 'STA_MAX', 'STA_MED']):
                        try:
                            print 'compute STA angles'
                            currT = time.time()
                            crownT['STA_RANGE'], crownT['STA_MED'], crownT[
                                'STA_MIN'], crownT[
                                    'STA_MAX'], angles = analysis.getLateralAngles(
                                        path, lat, corrBranchpts, rtpSkel)
                            print 'STA angles characteristics computed in ' + str(
                                time.time() - currT) + 's'
                        except:
                            print 'ERROR: No STA angles calculated'

                    if ifAnyKeyIsTrue(
                        ['CP_DIA25', 'CP_DIA50', 'CP_DIA75', 'CP_DIA90']):
                        try:
                            print 'compute diameter quantils'
                            currT = time.time()
                            crownT['CP_DIA25'], crownT['CP_DIA50'], crownT[
                                'CP_DIA75'], crownT[
                                    'CP_DIA90'] = analysis.getDiameterQuantilesAlongSinglePath(
                                        path, rtpSkel)
                            print 'Tap diameters computed in ' + str(
                                time.time() - currT) + 's'
                        except:
                            print 'ERROR: No quantile diameters calculated'

                    if ifAnyKeyIsTrue(['STA_DOM_I', 'STA_DOM_II']):
                        try:
                            print 'compute STA dominant angles'
                            currT = time.time()
                            crownT['STA_DOM_I'], crownT[
                                'STA_DOM_II'] = analysis.findHistoPeaks(angles)
                            print 'STA dominant angles computed in ' + str(
                                time.time() - currT) + 's'
                        except:
                            print 'ERROR: No dominant angles calculated (STA)'

                    if ifAnyKeyIsTrue(['STA_25_I', 'STA_25_II']):
                        try:
                            currT = time.time()
                            crownT['STA_25_I'], crownT[
                                'STA_25_II'] = analysis.findHistoPeaks(a25)
                            print 'STA 25 angles computed in ' + str(
                                time.time() - currT) + 's'
                        except:
                            print 'ERROR: No dominant angles25 calculated'

                    if ifAnyKeyIsTrue(['STA_50_I', 'STA_50_II']):
                        try:
                            currT = time.time()
                            crownT['STA_50_I'], crownT[
                                'STA_50_II'] = analysis.findHistoPeaks(a50)
                            print 'STA 50 angles computed in ' + str(
                                time.time() - currT) + 's'
                        except:
                            print 'ERROR: No dominant angles50 calculated'

                    if ifAnyKeyIsTrue(['STA_75_I', 'STA_75_II']):
                        try:
                            currT = time.time()
                            crownT['STA_75_I'], crownT[
                                'STA_75_II'] = analysis.findHistoPeaks(a75)
                            print 'STA 75 angles computed in ' + str(
                                time.time() - currT) + 's'
                        except:
                            print 'ERROR: No dominant angles75 calculated'

                    if ifAnyKeyIsTrue(['STA_90_I', 'STA_90_II']):
                        try:
                            currT = time.time()
                            crownT['STA_90_I'], crownT[
                                'STA_90_II'] = analysis.findHistoPeaks(a90)
                            print 'STA 90 angles computed in ' + str(
                                time.time() - currT) + 's'
                        except:
                            print 'ERROR: No dominant angles90 calculated'

                    if ifAnyKeyIsTrue(['RTA_DOM_I', 'RTA_DOM_II']):
                        try:
                            currT = time.time()
                            crownT['RTA_DOM_I'], crownT[
                                'RTA_DOM_II'] = analysis.findHistoPeaks(
                                    anglesN)
                            print 'angles computed in ' + str(time.time() -
                                                              currT) + 's'
                        except:
                            print 'ERROR: No dominant RTA angles calculated'
    io.setHomePath(oldHome)
    if maxExRoot >= 1:
        rtpSkel = -1
        os.chdir(io.getHomePath())
        io.setHomePath('./Lateral/')
        f = io.scanDir()
        for (counter, i) in enumerate(f):
            print 'processing lateral file: ' + i

            if maxExRoot > 0:
                xScale = allPara[counter / maxExRoot][7]
                yScale = allPara[counter / maxExRoot][8]
                io.setFileName(os.path.basename(i))
            else:
                xScale = allPara[counter][7]
                yScale = allPara[counter][8]
                io.setFileName(os.path.basename(i))
                io.setidIdx(counter)

            rtp = RootTipPaths.RootTipPaths(io)

            analysis = Analysis.Analysis(io, (xScale + yScale) / 2)

            try:
                img = scipy.misc.imread(i, flatten=True)
            except:
                print 'Image not readable'
                img = []
                pass
            if len(img) > 0:

                seg = Segmentation.Segmentation(img, io=io)
                imgL = seg.label()

                if imgL != None:
                    skel = Skeleton.Skeleton(imgL)
                    testSkel, testDia = skel.skel(imgL)
                    path, skelGraph = seg.findThickestPathLateral(
                        testSkel, testDia, xScale, yScale)
                    if ifAnyKeyIsTrue([
                            'LT_AVG_LEN', 'NODAL_LEN', 'LT_BRA_FRQ',
                            'NODAL_AVG_DIA', 'LT_AVG_ANG', 'LT_ANG_RANGE',
                            'LT_MIN_ANG', 'LT_MAX_ANG', 'LT_DIST_FIRST',
                            'LT_MED_DIA', 'LT_AVG_DIA'
                    ]):
                        rtpSkel, _, crownT['LT_MED_DIA'], crownT[
                            'LT_AVG_DIA'], _, rtps, _, _, _ = rtp.getRTPSkeleton(
                                path, skelGraph, True)

                    if rtpSkel != -1:
                        if ifAnyKeyIsTrue(['LT_BRA_FRQ']):
                            crownT[
                                'LT_BRA_FRQ'] = analysis.getBranchingfrequencyAlongSinglePath(
                                    rtps, path)
                            crownT[
                                'NODAL_AVG_DIA'], _ = analysis.getDiametersAlongSinglePath(
                                    path, rtpSkel, (xScale + yScale) / 2)
                            crownT['NODAL_LEN'] = analysis.getLengthOfPath(
                                path)
                        if ifAnyKeyIsTrue([
                                'LT_DIST_FIRST', 'LT_AVG_LEN', 'LT_BRA_FRQ',
                                'LT_ANG_RANGE', 'LT_AVG_ANG', 'LT_MIN_ANG',
                                'LT_MAX_ANG'
                        ]):
                            lat, corrBranchpts, crownT[
                                'LT_DIST_FIRST'] = seg.findLaterals(
                                    rtps, rtpSkel, (xScale + yScale) / 2, path)
                            if ifAnyKeyIsTrue(['LT_AVG_LEN']):
                                crownT[
                                    'LT_AVG_LEN'] = analysis.getLateralLength(
                                        lat, path, rtpSkel)
                            if ifAnyKeyIsTrue([
                                    'LT_ANG_RANGE', 'LT_AVG_ANG', 'LT_MIN_ANG',
                                    'LT_MAX_ANG'
                            ]):
                                crownT['LT_ANG_RANGE'], crownT[
                                    'LT_AVG_ANG'], crownT['LT_MIN_ANG'], crownT[
                                        'LT_MAX_ANG'], _ = analysis.getLateralAngles(
                                            path, lat, corrBranchpts, rtpSkel)
            allCrown.append(crownT.copy())
    else:
        allCrown.append(crownT.copy())

    io.setHomePath(oldHome)
Ejemplo n.º 9
0
Archivo: main.py Proyecto: avrajit/DIRT
def threadLateral(filepath):
    tipdiameter = 0.
    os.chdir(io.getHomePath())
    io.setHomePath('./Lateral/')
    f = io.scanDir()
    for (counter, i) in enumerate(f):
        print 'processing lateral file: ' + i
        if maxExRoot > 0:
            xScale = allPara[counter / maxExRoot][7]
            yScale = allPara[counter / maxExRoot][8]
            io.setFileName(os.path.basename(i))
        else:
            xScale = allPara[counter][7]
            yScale = allPara[counter][8]
            io.setFileName(os.path.basename(i))
            io.setidIdx(counter)

        rtp = RootTipPaths.RootTipPaths(io, tipdiameter)
        rtp.setTipDiaFilter(tipdiameter)

        analysis = Analysis.Analysis(io, (xScale + yScale) / 2)

        lateralT = []

        try:
            img = scipy.misc.imread(i, flatten=True)
        except:
            print 'Image not readable'
            img = []
            pass
        if len(img) > 0:

            seg = Segmentation.Segmentation(img, io=io)
            imgL = seg.label()

            if imgL != None:
                skel = Skeleton.Skeleton(imgL)
                testSkel, testDia = skel.skel(imgL)
                path, skelGraph = seg.findThickestPathLateral(
                    testSkel, testDia, xScale, yScale)
                rtpSkel, _, medianD, meanD, _, _, rtps, _, _, _ = rtp.getRTPSkeleton(
                    path, skelGraph, True)

                if rtpSkel != -1:
                    lBranchFreq = analysis.getBranchingfrequencyAlongSinglePath(
                        rtps, path)
                    avgLatDiameter, slope = analysis.getDiametersAlongSinglePath(
                        path, rtpSkel, (xScale + yScale) / 2)
                    lengthNodalRoot = analysis.getLengthOfPath(path)
                    lat, corrBranchpts, distToFirst = seg.findLaterals(
                        rtps, rtpSkel, (xScale + yScale) / 2)
                    avgLLength = analysis.getLateralLength(lat, path, rtpSkel)
                    angRange, avgAngle, minangle, maxAngle, _ = analysis.getLateralAngles(
                        path, lat, corrBranchpts, rtpSkel)
                    lateralT = [
                        avgLLength * ((xScale + yScale) / 2),
                        float(lengthNodalRoot) * ((xScale + yScale) / 2),
                        lBranchFreq, avgLatDiameter, slope, avgAngle, angRange,
                        minangle, maxAngle,
                        float(distToFirst) * ((xScale + yScale) / 2), medianD,
                        meanD
                    ]
                else:
                    lateralT = [
                        'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                        'nan', 'nan', 'nan', 'nan'
                    ]
            else:
                lateralT = [
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan'
                ]
            allLat.append(lateralT)
            if options[5][1] == '0':
                crownT = [
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan'
                ]
                allCrown.append(crownT)

    io.setHomePath(os.getcwd())
Ejemplo n.º 10
0
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM, GRU, SimpleRNN
from gensim.models import Word2Vec

cws_info_file = op.dirname(op.abspath(__file__)) + "/rnnseg/cws.info"
keras_model_file = op.dirname(op.abspath(__file__)) + "/rnnseg/cws_keras_model"
keras_model_weights_file = op.dirname(
    op.abspath(__file__)) + "/rnnseg/keras_model_weights"
cwsInfo = cws.loadCwsInfo(cws_info_file)
segmodel = loadModel(keras_model_file, keras_model_weights_file)

#------------------------copy everything above to the main part--------------------------------

#cls_rnn = torch.load('model.pkl')
#checking params

sample1 = 'Gasket-NC T=25mmPN20 gasket-NC'
sample2 = '1 Gasket 2 GASKET,PN20(Class 150), Flat ring, RF, 1.5 mm(1/16)thick, ASME B16.21 7551FD01 pc 20 32.06 641.2 7551FG01'
sample3 = '1 8402078 Fibre gasket F14, Aramid fiber with nitrile binder, RF, B16,5, NPS3/4,CLASS150, t=0.0625in27*57*1.5875 0.75 片 14 2.83 39.62'

out1 = Segmentation.segment([sample1], "./dic.json", cls_rnn, segmodel,
                            cwsInfo)
print(out1)
out2 = Segmentation.segment([sample2], "./dic.json", cls_rnn, segmodel,
                            cwsInfo)
print(out2)
out3 = Segmentation.segment([sample3], "./dic.json", cls_rnn, segmodel,
                            cwsInfo)
print(out3)
Ejemplo n.º 11
0
try:
    np.load = lambda *a,**k: np_load_old1(*a, allow_pickle=True, **k)
except:
    pass


#  To Train First Uncomment The Following Line
# Train()

# InputFolder = "PublicTestCases/Input"
# OutputFolder = "PublicTestCases/Output1"
InputFolder = args.inputfolder
OutputFolder = args.outputfolder



for filename in sorted(glob.glob(InputFolder+"/*")):
    try:
        outfilename = OutputFolder+filename.rsplit('.', 1)[0].replace(InputFolder,"")+".txt"

        image = io.imread(filename)

        x = PreProcessings(image)

        SegmentedNotes,NotesPerOctave,locations,Staffs,StaffThickness,StaffHeight = Segmentation(x)

        Classifier(SegmentedNotes,NotesPerOctave,locations,Staffs,StaffThickness,StaffHeight,out = outfilename,k=5)
    except:
        f = open(outfilename, "w")
        f.write("[]")
        f.close()
Ejemplo n.º 12
0
def get_segment(fname):
    return Segmentation.get_character_nps(Image.open(filename_prefix + fname + ".png").convert('L'), "")[0][0]
Ejemplo n.º 13
0
import numpy as np
import pandas as pd
import math
import sklearn.metrics
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
import csv
import Segmentation
import Sequence_Coupling
# import os

# absolute_path = os.path.normpath(os.path.join(os.path.dirname(__file__), '..\\..\\'))

dataset = Segmentation.Segment_Sequence()

##################
X_train_positive = dataset[dataset['Labels'].isin([1])]
X_train_negative = dataset[dataset['Labels'].isin([0])]
dataset = pd.concat([X_train_positive, X_train_negative], axis=0)
###################

print("Segmentation Done!")
train_index = pd.read_csv('Train_Indices.csv')
test_index = pd.read_csv('Test_Indices.csv')


results = []
i = 1
y_test_fold=[]
Ejemplo n.º 14
0
def ExtractedWordDeleteRepetition(extracted_word):
    """
    得出每个聚类去重后的所有关键词
    :param extracted_word: 未去重的每个聚类类别内部的关键词
    :return: 去重后提取的关键词
    """
    extracted_result = []
    for i in range(len(extracted_word)):
        for j in range(len(extracted_word[i])):
            if extracted_word[i][j][0] not in extracted_result:
                extracted_result.append(extracted_word[i][j][0])
    return extracted_result

if __name__ == '__main__':
    result = Reader.readfile('result38.bin')
    result_split = Segmentation.SplitCluster(result)
    delete_result = DeleteRepetition(result_split)
    candidate = BuildClass(delete_result)
    CalLenScore(candidate,2,15)
    CalSupScore(candidate,result_split)
    CalPosScore(candidate,result_split)
    for i in range(len(candidate)):
        for j in range(len(candidate[i])):
            candidate[i][j].CalScore()
    candidate_list = GenCandidateList(candidate)
    sorted_candidate_list = CandidateListSort(candidate_list)

    for i in range(len(sorted_candidate_list)):
        print("")
        for j in range(len(sorted_candidate_list[i])):
            print(sorted_candidate_list[i][j])
Ejemplo n.º 15
0
# coding=utf-8
'''
Created on Sep. 29, 2015

@author: LuDan
'''
import Segmentation as seg

fdir='E:/Other/USTB/nlp/Text1.txt'
frfile=True
ofile='text.txt'

SegResults=seg.rtSegResult(fdir, frfile, True)
#get the segmentation results
seg.FileSegResult(ofile, SegResults)
#put the segmentation results to a file
seg.PlotWordCloud(SegResults,  'Ttag_cloud.jpg',30,20,80)
#plot the wordcloud
#    print("Patient ", i )
#    patient = patientImages[i]
#    rows, cols = patient[0].shape
#    segmentedImages = np.empty((len(patient),rows, cols))
#    for n in range(len(patient)):
#        lungfilterArea, outline, watershedImage, sobelGradient, markerInternal, \
#        markerExternal, markerWatershed = Segmentation.seperate_lungs(patientImages[i][n])
#        segmentedLung = np.where(lungfilterArea == 1, patientImages[i][n], -2000)
#        segmentedImages[n, :, :] = segmentedLung
#    segmentedLungs.append(segmentedImages)


# 2.1.a Method2, Full Preprocessing Tutorial:
for i in range(len(patientImages)):
    print("Patient ", i)
    segmentedImages = Segmentation.segment_lung_mask(patientImages[i])
    segmentedLungs.append(segmentedImages)


# 3. SAVE ALL SEGMENTED LUNGS AS .NPY
print("\n", "SAVE ALL SEGMENTED LUNGS AS .NPY FILE")

for i in range(len(segmentedLungs)):
    Loader.save_stack(segmentedLungs[i], ('Students_seg' + str(i)))


# 2.1.d OPTIONAL: Plot the segmented lung from one patient and the HU values
#Preprocessing.print_pointcloud(segmentedLungs[0], -1500, 700)
#Preprocessing.plot_3d(segmentedLungs[0], -500)

print("Original Pic")
Ejemplo n.º 17
0
Archivo: main.py Proyecto: avrajit/DIRT
def threadCrown(filepath):
    imgL = []
    tipdiameter = float(options[8][1])
    print io.getHomePath()
    os.chdir(io.getHomePath())
    io.setHomePath('./Crown/')
    f = io.scanDir()
    for (counter, i) in enumerate(f):
        io.setFileName(os.path.basename(i))
        io.setidIdx(imgID)

        print 'processing Crown file: ' + i
        xScale = allPara[counter][7]
        yScale = allPara[counter][8]
        analysis = Analysis.Analysis(io, (xScale + yScale) / 2)
        rtp = RootTipPaths.RootTipPaths(io, tp=tipdiameter)
        rtp.setTipDiaFilter(tipdiameter * (xScale + yScale) / 2)
        crownT = []

        try:
            img = scipy.misc.imread(i, flatten=True)
        except:
            print 'Image not readable'
            img = -1
        if len(img) > 0:
            seg = Segmentation.Segmentation(img, io)
            imgL = seg.label()
            print 'compute root profile'
            currT = time.time()
            rootDensity, medianWidth, maxWidth, D, DS, _, _, _, _ = analysis.getWidthOverHeight(
                imgL, xScale, yScale)
            print 'Mask traits computed ' + str(time.time() - currT) + 's'
            currT = time.time()
            skel = Skeleton.Skeleton(imgL)
            testSkel, testDia = skel.skel(imgL)
            print 'Medial axis computed ' + str(time.time() - currT) + 's'
            currT = time.time()
            path, skelGraph, stemDia, skelSize = seg.findThickestPath(
                testSkel, testDia, xScale, yScale)
            allPara[counter][10] = skelSize
            print 'Central path computed ' + str(time.time() - currT) + 's'
            print 'compute rtp skeleton'
            currT = time.time()
            rtpSkel, nrOfRTP, medianTipDiameter, meanDiameter, dia90, _, rtps, tips, _, _ = rtp.getRTPSkeleton(
                path, skelGraph, True)
            allPara[len(allPara) - 1][2] = seg.getFail()
            seg.setTips(tips)
            print 'RTP Skeleton computed ' + str(time.time() - currT) + 's'
            print 'compute symmetry'
            currT = time.time()
            vecSym = analysis.getSymmetry(rtps, rtpSkel)
            print 'Symmetry computed ' + str(time.time() - currT) + 's'

            if rtpSkel != -1:

                lat, corrBranchpts, _ = seg.findLaterals(
                    rtps, rtpSkel, (xScale + yScale) / 2)

                try:
                    print 'compute quantile angles'
                    currT = time.time()
                    a25, a50, a75, a90 = analysis.calculateAngleQuantiles(
                        path, lat, corrBranchpts, rtpSkel)
                    print 'angles computed in ' + str(time.time() -
                                                      currT) + 's'
                except:
                    raise
                    a25 = ['nan']
                    a50 = ['nan']
                    a75 = ['nan']
                    a90 = ['nan']

                    print 'ERROR: No quantile angles calculated'

                try:
                    print 'compute angles'
                    currT = time.time()
                    angRangeN, avgAngleN, minangleN, maxAngleN, anglesN = analysis.calculateAngles(
                        path, lat, corrBranchpts, rtpSkel)
                    print 'angles computed in ' + str(time.time() -
                                                      currT) + 's'
                except:
                    avgAngleN = 'nan'
                    minangleN = 'nan'
                    maxAngleN = 'nan'
                    angRangeN = 'nan'
                    anglesN = 'nan'
                    print 'ERROR: No angles calculated'

                try:
                    print 'compute RTA angles'
                    currT = time.time()
                    angRange, avgAngle, minangle, maxAngle, angles = analysis.getLateralAngles(
                        path, lat, corrBranchpts, rtpSkel)
                    print 'angles computed in ' + str(time.time() -
                                                      currT) + 's'
                except:
                    raise
                    avgAngle = 'nan'
                    minangle = 'nan'
                    maxAngle = 'nan'
                    angRange = 'nan'
                    angles = 'nan'
                    print 'ERROR: No RTA angles calculated'
                try:

                    print 'compute diameter quantils'
                    currT = time.time()
                    d25, d50, d75, d90 = analysis.getDiameterQuantilesAlongSinglePath(
                        path, rtpSkel)
                    print 'diameters computed in ' + str(time.time() -
                                                         currT) + 's'
                except:
                    d25 = 'nan'
                    d50 = 'nan'
                    d75 = 'nan'
                    d90 = 'nan'
                    print 'ERROR: No quantile angles calculated'
                    raise

                try:
                    print 'compute dominant angles'
                    currT = time.time()
                    ang1, ang2 = analysis.findHistoPeaks(angles)
                    print 'angles computed in ' + str(time.time() -
                                                      currT) + 's'
                except:
                    ang1 = 'nan'
                    ang2 = 'nan'
                    print 'ERROR: No dominant angles calculated'
                try:
                    currT = time.time()
                    ang25_1, ang25_2 = analysis.findHistoPeaks(a25)
                    print 'angles computed in ' + str(time.time() -
                                                      currT) + 's'
                except:
                    ang25_1 = 'nan'
                    ang25_2 = 'nan'
                    print 'ERROR: No dominant angles25 calculated'
                try:
                    currT = time.time()
                    ang50_1, ang50_2 = analysis.findHistoPeaks(a50)
                    print 'angles computed in ' + str(time.time() -
                                                      currT) + 's'
                except:
                    ang50_1 = 'nan'
                    ang50_2 = 'nan'
                    print 'ERROR: No dominant angles50 calculated'
                try:
                    currT = time.time()
                    ang75_1, ang75_2 = analysis.findHistoPeaks(a75)
                    print 'angles computed in ' + str(time.time() -
                                                      currT) + 's'
                except:
                    ang75_1 = 'nan'
                    ang75_2 = 'nan'
                    print 'ERROR: No dominant angles75 calculated'
                try:
                    currT = time.time()
                    ang90_1, ang90_2 = analysis.findHistoPeaks(a90)
                    print 'angles computed in ' + str(time.time() -
                                                      currT) + 's'
                except:
                    ang90_1 = 'nan'
                    ang90_2 = 'nan'
                    print 'ERROR: No dominant angles90 calculated'

                try:
                    currT = time.time()
                    angN_1, angN_2 = analysis.findHistoPeaks(anglesN)
                    print 'angles computed in ' + str(time.time() -
                                                      currT) + 's'
                except:
                    angN_1 = 'nan'
                    angN_2 = 'nan'
                    print 'ERROR: No dominant angles90 calculated'

                crownT = [
                    stemDia, rootDensity, angRange, ang1, ang2, ang25_1,
                    ang25_2, ang50_1, ang50_2, ang75_1, ang75_2, ang90_1,
                    ang90_2, angN_1, angN_2, minangle, maxAngle, avgAngle,
                    angRangeN, avgAngleN, minangleN, maxAngleN, nrOfRTP,
                    medianTipDiameter, meanDiameter, dia90, medianWidth,
                    maxWidth, D[0], D[1], D[2], D[3], D[4], D[5], D[6], D[7],
                    D[8], DS[0], DS[1], DS[2], DS[3], DS[4], DS[5], DS[6],
                    DS[7], DS[8], vecSym[0], vecSym[1], d25, d50, d75, d90
                ]

            else:
                crownT = [
                    stemDia, rootDensity, 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', medianWidth, maxWidth, D[0], D[1],
                    D[2], D[3], D[4], D[5], D[6], D[7], D[8], DS[0], DS[1],
                    DS[2], DS[3], DS[4], DS[5], DS[6], DS[7], DS[8], vecSym[0],
                    vecSym[1], d25, d50, d75, d90
                ]

            if maxExRoot > 1:
                for i in range(maxExRoot):
                    allCrown.append(crownT)
            else:
                allCrown.append(crownT)
            if options[4][1] == '0':
                lateralT = [
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan'
                ]
                allLat.append(lateralT)
    io.setHomePath(os.getcwd())
Ejemplo n.º 18
0
def Train(NumberOfData):

    ImgCount = 0

    AllLength = 0
    AllCorrect = 0
    count = 0
    WrongImgs = 0

    try:
        shutil.rmtree("train")
    except:
        print("No train folder")

    os.mkdir("train")

    File = open("associtations.txt", "w")
    # exit(0)
    for scanned in os.listdir('dataset/scanned'):

        Path = 'dataset/scanned/' + scanned
        # Path2 = 'dataset/scanned2/'+scanned
        print(Path)
        Img = cv2.imread(Path)
        S = Segmentation(Img)
        try:
            S.Start()
        except:
            print("Error in reading image")
            continue

        FileName = 'dataset/text/' + scanned[:-4] + '.txt'
        print(FileName)

        File = open(FileName, "r")
        Lines = File.readlines()
        RealWords = Lines[0].split(" ")
        Words = S.GetSegmentedWords()

        Length = len(RealWords)
        print("================================")
        print(Length)
        print(len(Words))
        # print(len(Words2))

        if Length != len(Words):
            print("Error in Words")
            print("Number Of True Words: " + str(Length))
            print("Number of Words: " + str(len(Words)))
            WrongImgs += 1
            continue

        File = open("associtations.txt", "a")
        Correct = 0
        for i in range(Length):
            WL = len(Words[i])
            if WordLength(RealWords[i]) == WL:
                Correct += 1
                for j in range(WL):
                    name = str(ImgCount) + ".png"
                    Char = RealWords[i][j]
                    cv2.imwrite("train/" + name, Words[i][j])
                    if j < WL - 1:
                        if (RealWords[i][j] + RealWords[i][j + 1]) == "لا":
                            Char += 'ا'
                    File.write(str(Dict[Char]) + " " + name + "\n")
                    ImgCount += 1

        print(str((Correct / Length) * 100) + "%")

        print("================================")
        AllLength += Length
        AllCorrect += Correct

        count += 1
        if count == NumberOfData:
            break

    File.close()
    AllAccuracy = (AllCorrect / AllLength) * 100
    print("Segmentation Finished")
    print(str(WrongImgs) + " Failed Images")
    print("Testing on " + str(AllLength) + " Words ")
    print(str(AllCorrect) + " Are Correct")
    print("Accuracy : " + str(AllAccuracy) + "%")
Ejemplo n.º 19
0
    # Setting variables to search for path
    path = ['E:\\']
    path1 = []
    npath = " "
    for pathn in path:

        # Using os.walk() to walk through the directories
        for root, dirs, files in os.walk(pathn):
            for names in files:
                if names == stri:
                    npath = os.path.join(root, names)
                    path1.append(npath)
                    break

    # Checking if file has been found
    if len(path1) == 0:
        # If file has not been found, raise exception
        raise FileNotFoundError
    else:
        return path1[0]


# Handling exception raised by inputfind()
try:
    path = inputFind()
    sample = Segmentation.LineSegment(path)
    sample.connectComponents()
    sample.Watershed()
    sample.disp_image()
except FileNotFoundError:
    print("Error! The specified file could not be found")
def process2(rotImg):
    #SrcImage = './data/rotImg.jpg'
    #rotImg = imread(SrcImage, mode='RGB')
    #rotImg = np.uint8(rotImg)

    ## image segmentation:
    panoSegment = Segmentation.gbPanoSegment(rotImg, 0.5, 200, 50)

    #SrcImage = './data/panoSegment.mat'
    #dict = loadmat(SrcImage)
    #panoSegment = dict['panoSegment']

    plt.imshow(panoSegment, cmap='gray')
    plt.title('Segmentation: left and right are connected')
    plt.show()

    ## Get region inside a polygon
    dict = loadmat('./data/points.mat')
    # load room corner
    points = dict['points']

    dict = loadmat('./data/uniformvector_lvl8.mat')
    coor = dict['coor']
    tri = dict['tri']

    vcs = CoordsTransform.uv2coords(CoordsTransform.xyz2uvN(coor, 0), 1024,
                                    512, 0)
    # transfer vectors to image coordinates
    coords = CoordsTransform.uv2coords(CoordsTransform.xyz2uvN(points, 0),
                                       1024, 512, 0)

    [s_xyz, _] = PolygonRegion.sortXYZ(points[0:4, :])
    # define a region with 4 vertices
    [inside, _, _] = PolygonRegion.insideCone(s_xyz[-1::-1, :], coor, 0)
    # test which vectors are in region

    #figure(8);
    plt.imshow(rotImg)
    #hold on
    for i in np.arange(4):
        plt.scatter(coords[i, 0], coords[i, 1], 100, 'r', 's')

    for i in np.where(inside):
        plt.scatter(vcs[i, 0], vcs[i, 1], 1, 'g', 'o')

    [s_xyz, I] = PolygonRegion.sortXYZ(points[4:8, :])
    [inside, _, _] = PolygonRegion.insideCone(s_xyz[-1::-1, :], coor, 0)
    for i in np.arange(4, 8):
        plt.scatter(coords[i, 0], coords[i, 1], 100, 'r', 's')

    for i in np.where(inside):
        plt.scatter(vcs[i, 0], vcs[i, 1], 1, 'b', 'o')

    plt.title('Display of two wall regions')
    plt.show()

    ## Reconstruct a box, assuming perfect upperright cuboid
    D3point = np.zeros([8, 3])
    pointUV = CoordsTransform.xyz2uvN(points, 0).T
    floor = -160

    floorPtID = np.array([2, 3, 6, 7, 2]) - 1
    ceilPtID = np.array([1, 4, 5, 8, 1]) - 1
    for i in np.arange(4):
        D3point[floorPtID[i], :] = LineFaceIntersection.LineFaceIntersection(
            np.array([0, 0, floor]), np.array([0, 0, 1]), np.array([0, 0, 0]),
            points[floorPtID[i], :])
        D3point[ceilPtID[i], 2] = D3point[floorPtID[i], 2] / np.tan(
            pointUV[floorPtID[i], 1]) * np.tan(pointUV[ceilPtID[i], 1])

    ceiling = np.mean(D3point[ceilPtID, 2])
    for i in np.arange(4):
        D3point[ceilPtID[i], :] = LineFaceIntersection.LineFaceIntersection(
            np.array([0, 0, ceiling]), np.array([0, 0, 1]),
            np.array([0, 0, 0]), points[ceilPtID[i], :])

    #figure(9);
    #fig = plt.figure()
    #ax = fig.add_subplot(111, projection='3d')
    #ax.scatter(D3point[floorPtID,0], D3point[floorPtID,1], D3point[floorPtID,2]);
    #hold on
    #ax.scatter(D3point[ceilPtID,0], D3point[ceilPtID,1], D3point[ceilPtID,2]);

    #for i in np.arange(4):
    #ax.scatter(D3point[[floorPtID[i], ceilPtID[i]],0], D3point[[floorPtID[i], ceilPtID[i]],1], D3point[[floorPtID[i], ceilPtID[i]],2]);

    #plt.title('Basic 3D reconstruction');
    #plt.show()
    #figure(10);
    firstID = np.array([1, 4, 5, 8, 2, 3, 6, 7, 1, 4, 5, 8]) - 1
    secndID = np.array([4, 5, 8, 1, 3, 6, 7, 2, 2, 3, 6, 7]) - 1
    lines = LineFaceIntersection.lineFromTwoPoint(points[firstID, :],
                                                  points[secndID, :])

    plt.imshow(Visualization.paintParameterLine(lines, 1024, 512, rotImg))
    #hold on

    for i in np.arange(8):
        plt.scatter(coords[i, 0], coords[i, 1], 100, 'r', 's')

    plt.title('Get lines by two points')
    plt.show()
Ejemplo n.º 21
0
import MorphologicalProcessing as morph
import Segmentation as segment
import Representation as rep
import numpy
from PIL import Image

original = Image.open("image/rectangle.jpg")
original = numpy.array(original)

shifted = Image.open("image/rectangle_shifted.jpg")
shifted = numpy.array(shifted)

rotated = numpy.transpose(original)

# im = pp.rgb_to_gray(im)
ori_seg = segment.segmentation(3, original)
shift_seg = segment.segmentation(3, shifted)
rot_seg = segment.segmentation(3, rotated)

f = pyplot.figure()
f.add_subplot(3, 2, 1)
pyplot.title('original')
pyplot.imshow(original, cmap = pyplot.get_cmap('gray'))

f.add_subplot(3, 2, 2)
pyplot.title('original segmented')
pyplot.imshow(ori_seg, cmap = pyplot.get_cmap('gray'))

f.add_subplot(3, 2, 3)
pyplot.title('shifted')
pyplot.imshow(shifted, cmap = pyplot.get_cmap('gray'))
Ejemplo n.º 22
0
import Segmentation

print(Segmentation.segment(['Gasket-NC T=25mmPN20Gasket-NC'], "./dic.json"))