Exemple #1
0
    def get_parameters(self, verbose=True):
        '''Returns parameter sets on contour of given cosmology
		'''
        if self.chain_name == 'lcdm':
            Parameter_contour = Contour.LCDM_Contour(
                self.chain_name, self.directory, self.contour_level,
                self.tolerance, self.bins_tuple, self.smoothing)
        else:
            Parameter_contour = Contour.Contour(
                self.chain_name, self.directory, self.contour_level,
                self.tolerance, self.bins_tuple, self.smoothing)

        if verbose:
            print "Does", self.chain_name, "contour exist?",
        if Parameter_contour.test_contour_exists():
            if verbose:
                print "Yes"
        else:
            if verbose:
                print "No"
            Parameter_contour.pickle_contour()

        if self.chain_name == 'lcdm':
            parameter_sets = Parameter_contour.read_pickled_contour()
        else:
            omega_contour, w0_contour, wa_contour = Parameter_contour.read_pickled_contour(
            )
            parameter_sets = omega_contour, w0_contour, wa_contour

        return parameter_sets
Exemple #2
0
def oplot_3d_contours():
    '''
    This function tests if the contours produced with different binning
    and smoothing settings agree visually. It is really messy, and could
    use some cleaning up, if it is to be used more than just the one time.
    '''
    CPL_Contour = Contour.Contour(chain_name='cpl',
                                  directory='/Users/perandersen/Data/HzSC/',
                                  bins_tuple=(30, 30, 30),
                                  tolerance=0.001,
                                  smoothing=0.6)
    #CPL_Contour_2 = Contour.Contour(chain_name='cpl', directory='/Users/perandersen/Data/HzSC/',bins_tuple=(40,40,40),tolerance = 0.001, smoothing=0.6)
    CPL_Contour_2 = Contour.Contour(chain_name='n3cpl',
                                    directory='/Users/perandersen/Data/HzSC/',
                                    bins_tuple=(60, 60, 60),
                                    tolerance=0.001,
                                    smoothing=0.6)
    CPL_Contour_3 = Contour.Contour(chain_name='n3cpl',
                                    directory='/Users/perandersen/Data/HzSC/',
                                    bins_tuple=(50, 50, 50),
                                    tolerance=0.001,
                                    smoothing=0.6)
    x_contour, y_contour, z_contour = CPL_Contour.read_pickled_contour()
    x_contour_2, y_contour_2, z_contour_2 = CPL_Contour_2.read_pickled_contour(
    )
    x_contour_3, y_contour_3, z_contour_3 = CPL_Contour_3.read_pickled_contour(
    )
    print len(x_contour)
    print len(x_contour_2)
    print len(x_contour_3)
    fig_scatter = plt.figure()
    ax_scatter = fig_scatter.add_subplot(111, projection='3d')
    #ax_scatter.scatter(x_contour, y_contour, z_contour, color='g')
    ax_scatter.scatter(x_contour_2, y_contour_2, z_contour_2)
    ax_scatter.scatter(x_contour_3, y_contour_3, z_contour_3, color='r')
Exemple #3
0
    def run(self):
        title = 'MBARI LRAUV Survey'
        outFile = self.args.outDir + '/' + self.args.platformName  + '_log_' + self.startDatetimeUTC.strftime('%Y%m%dT%H%M%S') + '_' + self.endDatetimeUTC.strftime('%Y%m%dT%H%M%S') + '.png'
        c = Contour(self.startDatetimeUTC, self.endDatetimeUTC, self.args.database, self.args.platformName, self.args.parms, title, outFile, False)
        c.run()

        cmd = r'scp %s [email protected]:/mbari/LRAUV/stoqs' % (outFile)
        #logger.debug('%s', cmd)
        import pdb; pdb.set_trace()
        os.system(cmd)
Exemple #4
0
def GetOuterContour(cluster, h):
    xpix, ypix = GeoPosition(cluster, h)
    outcorners = GetOuterCorners(cluster, h)
    corners = {}
    ## go clockwise
    for corner in outcorners:
        dx = corner[0] - xpix
        dy = corner[1] - ypix
        angle = -999
        if (dx != 0):
            absang = abs(math.atan(dy / dx))
            if (dx < 0):
                if (dy >= 0): angle = absang
                else: angle = 2 * pi - absang
            if (dx > 0):
                if (dy >= 0): angle = pi - absang
                else: angle = pi + absang
        else:
            if (dy < 0): angle = 3 * pi / 2
            else: angle = +pi / 2
        corners.update({angle: corner})
    cwcorners = dict(sorted(corners.items()))
    scwcorners = "["
    for txt, corner in cwcorners.items():
        scwcorners += str(corner) + ", "
    scwcorners += "]"
    # print("Sorted corners (for cluster size=",len(cluster),"): ",len(outcorners),scwcorners)
    srtcorners = []
    for angl, corner in cwcorners.items():
        srtcorners.append(corner)
    contour = cnt.Contour(srtcorners, xpixsize, ypixsize, len(cluster), False)
    return srtcorners, contour
Exemple #5
0
def main():
    print(
        "*************************************Main Starts*******************************"
    )
    # Empty Variables for storing contours
    allContoursWithData = []  # declare empty lists,
    validContoursWithData = []  # we will fill these shortly

    # Loading Training Classifications
    try:
        npaClassifications = np.loadtxt(
            "classifications.txt",
            np.float32)  # read in training classifications

    except:
        print("error, unable to open classifications.txt, exiting program\n")
        os.system("pause")
        return
    # end try

    # Loading Training Images
    try:
        npaFlattenedImages = np.loadtxt("flattened_images.txt",
                                        np.float32)  # read in training images
    except:
        print("error, unable to open flattened_images.txt, exiting program\n")
        os.system("pause")
        return
    # end try

    # converting to numpy array
    npaClassifications = npaClassifications.reshape(
        (npaClassifications.size, 1))
    # print(npaClassifications)

    # Create a dictionary variable from the training data
    dataset = knn.knn_dictionary_converter(npaClassifications,
                                           npaFlattenedImages)
    print(dataset.keys())

    # Reading the testing image
    imgTestingNumbers = cv2.imread(
        "test2.jpg")  # read in testing numbers image
    if imgTestingNumbers is None:  # if image was not read successfully
        print("error: image not read from file \n\n"
              )  # print error message to std out
        os.system("pause")  # pause so user can see error message
        return  # and exit function (which exits program)
    # end if

    imgGray = cv2.cvtColor(imgTestingNumbers,
                           cv2.COLOR_BGR2GRAY)  # get grayscale image
    imgBlurred = cv2.GaussianBlur(imgGray, (5, 5), 0)  # blur

    # filter image from grayscale to black and white
    imgThresh = cv2.adaptiveThreshold(
        imgBlurred,  # input image
        255,  # make pixels that pass the threshold full white
        cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
        # use gaussian rather than mean, seems to give better results
        cv2.THRESH_BINARY_INV,
        # invert so foreground will be white, background will be black
        11,  # size of a pixel neighborhood used to calculate threshold value
        2)  # constant subtracted from the mean or weighted mean

    imgThreshCopy = imgThresh.copy(
    )  # make a copy of the thresh image, this in necessary b/c findContours modifies
    # the image

    imgContours, contours, npaHierarchy = cv2.findContours(
        imgThreshCopy,
        # input image, make sure to use a copy since the
        # function will modify this image in the course of
        # finding contours
        cv2.RETR_EXTERNAL,  # retrieve the outermost contours only
        cv2.CHAIN_APPROX_SIMPLE)  # compress horizontal,
    # vertical, and diagonal segments and leave only their end points

    for contour in contours:  # for each contour
        contourWithData = cont.Contour(
        )  # instantiate a contour with data object
        contourWithData.contour = contour  # assign contour to contour with data
        contourWithData.boundingRect = cv2.boundingRect(
            contourWithData.contour)  # get the bounding rect
        contourWithData.calculateRectTopLeftPointAndWidthAndHeight(
        )  # get bounding rect info
        contourWithData.fltArea = cv2.contourArea(
            contourWithData.contour)  # calculate the contour area
        allContoursWithData.append(
            contourWithData
        )  # add contour with data object to list of all contours with data
    # end for

    for contourWithData in allContoursWithData:  # for all contours
        if contourWithData.checkIfContourIsValid():  # check if valid
            validContoursWithData.append(
                contourWithData)  # if so, append to valid contour list
        # end if
    # end for

    validContoursWithData.sort(key=operator.attrgetter(
        "intRectX"))  # sort contours from left to right

    strFinalString = ""  # declare final string, this will have the final number sequence by the end of the program

    for contourWithData in validContoursWithData:  # for each contour
        # draw a green rect around the current char
        cv2.rectangle(
            imgTestingNumbers,  # draw rectangle on original testing image
            (contourWithData.intRectX,
             contourWithData.intRectY),  # upper left corner
            (contourWithData.intRectX + contourWithData.intRectWidth,
             contourWithData.intRectY +
             contourWithData.intRectHeight),  # lower right corner
            (0, 255, 0),  # green
            2)  # thickness

        imgROI = imgThresh[contourWithData.intRectY:contourWithData.intRectY +
                           contourWithData.intRectHeight,
                           # crop char out of threshold image
                           contourWithData.intRectX:contourWithData.intRectX +
                           contourWithData.intRectWidth]

        imgROIResized = cv2.resize(
            imgROI, (c.RESIZED_IMAGE_WIDTH, c.RESIZED_IMAGE_HEIGHT)
        )  # resize image, this will be more consistent for recognition and storage

        npaROIResized = imgROIResized.reshape(
            (1, c.RESIZED_IMAGE_WIDTH *
             c.RESIZED_IMAGE_HEIGHT))  # flatten image into 1d numpy array

        npaROIResized = np.float32(
            npaROIResized
        )  # convert from 1d numpy array of ints to 1d numpy array of floats
        # print(npaROIResized)
        CharResult = knn.k_nearest_neighbors(dataset, npaROIResized,
                                             k=4)  # get character from results
        print("Character is: " + CharResult)
        strFinalString = strFinalString + CharResult  # append current char to full string
    # end for

    print("\n" + strFinalString + "\n")  # show the full string

    cv2.imshow("imgTestingNumbers", imgTestingNumbers
               )  # show input image with green boxes drawn around found digits

    cv2.waitKey(0)  # wait for user key press
    print(
        "*************************************Main Ends*******************************"
    )
Exemple #6
0
import Contour
import Process
from Simple import Point
from IO import Reader
from Plot import Core as pl
from Utils import Utils

list_of_points = Point.CreateListOfPoints(Reader.contour_points)
contour = Contour.Contour(Utils.createContour())
init_flow_len = Reader.init_flow_len
x_max = Reader.x_max
y_max = Reader.y_max
alfa = Reader.alfa
circulation = Reader.circulation

process = Process.Process(contour, x_max, y_max, init_flow_len, circulation,
                          alfa)

pl.drawField(Utils.parseField(process.vector_field),
             Utils.parseContour(contour.list_of_vortex), process.pressure_diff,
             process.pressure_max, process.pressure_min)
stop_rotating = False
show_tumor = False
show_head = True
turn_right = True

window = pyglet.window.Window(width=1400,
                              height=800,
                              caption="Brain Visualizer")
window.projection = pyglet.window.Projection3D()

possible_inputs = os.listdir("inputs")
possible_inputs.sort(key=lambda x: int(x.replace("input", "")))

random_input = possible_inputs[random.randint(0, len(possible_inputs) - 1)]

input_files, texture_files, mask_output = cT.return_input_file_paths(
    "inputs/{}".format(random_input))

tops, middles, bottoms, mask_tops, mask_middles, mask_bottoms, first_mask_index = cT.pyglet_bulk_return_contour_for_surfaces_with_colors(
    input_files, mask_output)

origin_axis = pyglet.graphics.vertex_list(2, ("v3f", (0, 0, -10, 0, 0, 10)),
                                          ("c3B", (255, 0, 0, 255, 0, 0)))

texture_canvas = pyglet.graphics.vertex_list(
    6, ('v3f', (-1.2, -1.2, 0, 1.2, -1.2, 0, -1.2, 1.2, 0, 1.2, -1.2, 0, 1.2,
                1.2, 0, -1.2, 1.2, 0)),
    ('t2f', (0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1)))

draw_exclusively = -1
degree = 0
Exemple #8
0
def plot_3d_contours(chain_name,
                     bins,
                     smoothing,
                     tolerance=0.005,
                     labels=['x', 'y', 'z']):
    redshift_cut = 0.4

    fig_scatter = plt.figure()
    ax_scatter = fig_scatter.add_subplot(111, projection='3d')
    ax_scatter.set_xlabel(labels[0])
    ax_scatter.set_ylabel(labels[1])
    ax_scatter.set_zlabel(labels[2])

    contour = Contour.Contour(chain_name=chain_name,
                              directory='/Users/perandersen/Data/HzSC/',
                              bins_tuple=bins[0],
                              tolerance=tolerance,
                              smoothing=smoothing)
    try:
        x_contour, y_contour, z_contour = contour.read_pickled_contour()
    except:
        contour.pickle_contour()
        x_contour, y_contour, z_contour = contour.read_pickled_contour()

    ax_scatter.scatter(x_contour,
                       y_contour,
                       z_contour,
                       color='b',
                       s=1.,
                       depthshade=True)
    for ii in np.arange(len(bins)):
        deltamus = Deltamu.Deltamu(chain_name,
                                   '',
                                   do_marg=True,
                                   bins_tuple=bins[ii],
                                   smoothing=smoothing,
                                   tolerance=tolerance)
        fname = root_dir + deltamus.get_marg_file_name()
        unique_par_sets_min, unique_par_sets_max, unique_par_redshifts_min, unique_par_redshifts_max = get_unique_parameter_sets(
            fname)
        unique_par_sets_min = np.array(unique_par_sets_min)
        unique_par_sets_max = np.array(unique_par_sets_max)

        unique_par_sets_min = unique_par_sets_min[
            unique_par_redshifts_min > redshift_cut]
        unique_par_sets_max = unique_par_sets_max[
            unique_par_redshifts_max > redshift_cut]

        unique_par_redshifts_min = unique_par_redshifts_min[
            unique_par_redshifts_min > redshift_cut]
        unique_par_redshifts_max = unique_par_redshifts_max[
            unique_par_redshifts_max > redshift_cut]

        print unique_par_sets_min
        print unique_par_redshifts_min

        om_min = unique_par_sets_min[:, 0]
        w0_min = unique_par_sets_min[:, 1]
        wa_min = unique_par_sets_min[:, 2]
        om_max = unique_par_sets_max[:, 0]
        w0_max = unique_par_sets_max[:, 1]
        wa_max = unique_par_sets_max[:, 2]

        color_min = np.zeros((len(unique_par_redshifts_min), 3))
        for jj in np.arange(len(color_min)):
            color_min[jj, 0] = 1.
            color_min[jj, 1] = float(jj) / len(color_min)
            color_min[jj, 2] = float(jj) / len(color_min)

        color_min = color_min[::-1]

        color_max = np.zeros((len(unique_par_redshifts_max), 3))
        for jj in np.arange(len(color_max)):
            color_max[jj, 0] = 1.
            color_max[jj, 1] = float(jj) / len(color_max)
            color_max[jj, 2] = float(jj) / len(color_max)
            #print jj, color_max[jj,0], color_max[jj,1], color_max[jj,2]

        color_max = color_max[::-1]

        ax_scatter.scatter(om_min[:],
                           w0_min[:],
                           wa_min[:],
                           color=color_min,
                           s=100.,
                           depthshade=False,
                           edgecolor='k')
        ax_scatter.scatter(om_max[:],
                           w0_max[:],
                           wa_max[:],
                           color=color_max,
                           s=100.,
                           depthshade=False,
                           marker='^',
                           edgecolor='k')
Exemple #9
0
def Analyse(acquire_new, result_path, source_path, obj_mag, material, ftr,
            presentation):
    steps = [
        '', '', '1_Image_Acquisition', 'Global_Histogram', '2_Segmentation',
        '3_ROIs', '4_Local_Histogram'
    ]
    full_path = []
    for step in steps:
        full_path.append(os.path.join(result_path, step))
    #print (full_path)
    if not acquire_new:
        full_path[2] = source_path
    list_of_samples = os.listdir(full_path[2])

    stats_file = open(os.path.join(result_path, 'ListOfFlakes.csv'), "a+")
    stat = "LithoCoord,Contrast,EstLayers,BoundingBoxArea(um^2)\n"
    stats_file.write(stat)
    stats_file.close()

    for sample in list_of_samples:
        #sample_path = os.path.join(full_path[2], str(sample))
        print(sample)
        ContourObj = Contour.Contour(str(sample), full_path[2], result_path,
                                     obj_mag, material, ftr)
        if material == 'Sb':
            ContourObj.edges_gluey(obj_mag=obj_mag,
                                   ftr=0,
                                   glue_remover=True,
                                   presentation=presentation)
        else:
            ContourObj.edges_gluey(obj_mag,
                                   ftr,
                                   glue_remover=False,
                                   presentation=presentation)
        #self.ContourObj.segmentation(ftr=self.filter)
        ROIObj = Rectangle.Rectangle((full_path[4] + '\\Contours'), sample,
                                     full_path[2], result_path, obj_mag,
                                     material, ftr)
        X_Y_FS = ROIObj.markROIs(True, presentation)

        list_of_chunks = os.listdir((full_path[5] + '\\' + sample[:-4]))
        list_of_chunk_numbers = []
        if material == 'C':
            for chunk, ant in zip(list_of_chunks, X_Y_FS):
                stat = str(sample)[:-4] + '_'
                HistogramObj = Histogram.Histogram(
                    str(chunk), (full_path[5] + '\\' + sample[:-4]),
                    (full_path[6]), result_path, obj_mag)
                #self.HistogramObj = Histogram.Histogram(str(chunk), (full_path[5]+'\\'+sample), (full_path[6]+'\\'+sample), self.result_path,obj_mag=self.obj_mag) #deeper directory
                contrast, layers = HistogramObj.saveLocalHistogram(
                    ftr, material, presentation)
                if not contrast is None and len(contrast) > 3:
                    chunk_number = int(chunk[-6:-4])
                    stat += str(chunk_number).zfill(2) + ',' + contrast
                    stat += ',' + layers
                    list_of_chunk_numbers.append(chunk_number)
                if ant[3] in list_of_chunk_numbers:
                    #TB = 'T' if (ant[1]<256) else ('B')
                    #LR = 'L' if (ant[0]<256) else ('R')
                    stat += "," + str(ant[2]) + "\n"
                    stats_file = open(
                        os.path.join(result_path, 'ListOfFlakes.csv'), "a+")
                    stats_file.write(stat)
                    stats_file.close()

        cv2.destroyAllWindows()
Exemple #10
0
# #
# text = pytesseract.image_to_string(lettre, lang='fra', config=" --psm 10 --oem 3")
# print(text)

if nomfichier == 'image1.png' or nomfichier == 'image2.png':
    w, imagec, ImageP = projetction.Getimage(nomfichier)
    # Projection
    H = projetction.GetHProjection(imagec)
    # Séparer les caractères
    Position = projetction.GetHWposition(H, w, imagec, ImageP)
    # reconnaissance de lettre
    projetction.Reconna1(Position, ImageP)

else:
    # localiser la partie text
    image, edged = Contour.PreTraite(nomfichier)
    # chercher le contour
    Contour.findContour(image, edged)
    # touner image de text (imagecontour.jpg)
    p = rotate.getCorrect()
    # charger l'image pour séparer les lignes et les caractères (imagerotate.jpg)
    w, imagec, ImageP = projetction.Getimage('imagerotate.jpg')
    # Projection
    H = projetction.GetHProjection(imagec)
    # Projection
    W = projetction.GetVProjection(imagec)
    # tracer les contour de chaque caractères puis faire la reconnaissance
    Position = projetction.GetHWposition(H, w, imagec, ImageP)
    # reconnaissance de lettre
    projetction.Reconna2(Position, ImageP)
Exemple #11
0
def demo_loft_cylinder(dstdir):
    Contour.SetContourKernel('Circle')
    try:
        Repository.Delete('path')
        Repository.Delete('ct')
        Repository.Delete('ct2')
    except:
        pass
    #generate path
    p = Path.pyPath()
    p.NewObject('path')
    p.AddPoint([0.,0.,0.])
    p.AddPoint([0.,0.,30.])
    p.CreatePath()
    num = p.GetPathPtsNum()
    
    #create two contours
    c = Contour.pyContour()
    c.NewObject('ct','path',0)
    c.SetCtrlPtsByRadius([0.,0.,0.],2)
    c.Create()
    print ("Contour created: area is: " + str(c.Area()) + "; center is: " +str(c.Center()))
    
    c2 = Contour.pyContour()
    c2.NewObject('ct2','path',num-1)
    c2.SetCtrlPtsByRadius([0.,0.,30.],2)
    c2.Create()
    print ("Contour created: area is: " + str(c2.Area()) + "; center is: " +str(c2.Center()))
    c.GetPolyData('ctp')
    c2.GetPolyData('ct2p')
    
    #processing the contours
    numOutPtsAlongLength = 12
    numPtsInLinearSampleAlongLength = 240
    numLinearPtsAlongLength = 120
    dstName='loft'
    numOutPtsInSegs = 60
    numModes = 20
    useFFT = 0
    useLinearSampleAlongLength = 1

    Geom.SampleLoop('ctp',numOutPtsInSegs,'ctps')
    Geom.SampleLoop('ct2p',numOutPtsInSegs,'ct2ps')
    Geom.AlignProfile('ctps','ct2ps','ct2psa',0)

    srcList = ['ctps','ct2psa']
    Geom.LoftSolid(srcList,dstName,numOutPtsInSegs,numOutPtsAlongLength,numLinearPtsAlongLength,numModes,useFFT,useLinearSampleAlongLength)
    #cap the cylinder
    VMTKUtils.Cap_with_ids(dstName,'cap',0,0)

    solid = Solid.pySolidModel()
    solid.NewObject('cyl')
    solid.SetVtkPolyData('cap')
    solid.GetBoundaryFaces(90)
    print ("Creating model: \nFaceID found: " + str(solid.GetFaceIds()))
    solid.WriteNative(dstdir + "/cylinder.vtp")
    
    Repository.Delete('ctp')
    Repository.Delete('ct2p')
    Repository.Delete('ct2ps')
    Repository.Delete(dstName)
    Repository.Delete('cap')
    Repository.Delete('path')
    
    from shutil import copyfile
    copyfile("cylinder.vtp.facenames2",dstdir + "/cylinder.vtp.facenames")  
    return dstdir+"/cylinder.vtp"