def getCatchment(self, timestep=0):
        """
        Read the HDF5 file for a given time step and extract the catchment.

        Parameters
        ----------
        variable : timestep
            Time step to load.

        """

        df = h5py.File('%s/flow.time%s.hdf5'%(self.folder, timestep), 'r')
        vertices = np.array((df['/coords']))
        lbasin = np.array((df['/basin']))
        lfacc = np.array((df['/discharge']))
        lconnect = np.array((df['/connect']))
        lchi = np.array((df['/chi']))
        con1, con2 = np.hsplit(lconnect, 2)
        conIDs = np.append(con1, con2)
        IDs = np.unique(conIDs-1)

        Xl = np.zeros((len(lconnect[:,0]),2))
        Yl = np.zeros((len(lconnect[:,0]),2))
        Xl[:,0] = vertices[lconnect[:,0]-1,0]
        Xl[:,1] = vertices[lconnect[:,1]-1,0]
        Yl[:,0] = vertices[lconnect[:,0]-1,1]
        Yl[:,1] = vertices[lconnect[:,1]-1,1]
        Zl = vertices[lconnect[:,1]-1,2]
        FAl = lfacc[lconnect[:,0]-1]
        Chil = lchi[lconnect[:,0]-1]
        Basinl = lbasin[lconnect[:,0]-1]

        X1 = Xl[:,0]
        X2 = Xl[:,1]
        Y1 = Yl[:,0]
        Y2 = Yl[:,1]
        Z = Zl
        FA = FAl
        Chi = Chil
        Basin = Basinl

        self.XY = np.column_stack((X1, Y1))
        if self.ptXY[0] < X1.min() or self.ptXY[0] > X1.max():
            raise RuntimeError('X coordinate of given point is not in the simulation area.')

        if self.ptXY[1] < Y1.min() or self.ptXY[1] > Y1.max():
            raise RuntimeError('Y coordinate of given point is not in the simulation area.')

        distance,index = spatial.KDTree(self.XY).query(self.ptXY)
        self.basinID = Basin[index]
        basinIDs = np.where(Basin == Basin[index])[0]
        self.Basin = Basin

        self.donX = X1[basinIDs]
        self.donY = Y1[basinIDs]
        self.rcvX = X2[basinIDs]
        self.rcvY = Y2[basinIDs]
        self.Z = Z[basinIDs]
        self.FA = FA[basinIDs]
        self.Chi = Chi[basinIDs]

        return
Beispiel #2
0
    for line in raw:
        if line != 'EOF' and line != '':
            print line
            if count > 5:
                split = line.strip().split(' ')
                if split != []:
                    split = [int(split[1]), int(split[2])]
                    v = g.add_vertex()
                    g.vp.position[v] = split
                    lines.append(split)
            else:
                count+=1
                
lines = np.array(lines)
num_vertices = len(lines)
tree = spatial.KDTree(lines)
distances, hoods_arr = tree.query(lines, num_vertices)

distances = [dist[1:] for dist in distances]
hoods_arr = [nbhd[1:] for nbhd in hoods_arr]

g.properties[("e", "neighborhoods")] = g.new_edge_property("boolean")

neighborhoods_array = [
    nl.NeighborLists(g, vertex, distances[index], hoods_arr[index]).neighborhood \
    for index, vertex in enumerate(g.vertices())] 


           
gt.graph_draw(g, pos=g.vp.position)    
property_map = gt.group_vector_property(neighborhoods_array)
Beispiel #3
0
def euclidean_halton_graph(n, radius, bases, lower, upper, source, target,
                           mapFile, car_width, car_length, collision_delta):
    manager = ObstacleManager(mapFile, car_width, car_length, collision_delta)

    G = nx.DiGraph()
    upper = numpy.array(upper)
    lower = numpy.array(lower)
    scale = upper - lower
    offset = lower

    position = []

    numVertices = 0
    haltonIndex = 1

    if source is not None:
        position.append(source)
        num_vertices += 1
    if target is not None:
        position.append(target)
        num_vertices += 1

    print '[GraphGenerator] Populating node...'
    while numVertices < n:
        p = wrap_around(
            numpy.array(
                [halton_sequence_value(haltonIndex, base) for base in bases]))
        p = p * scale + offset

        if manager.get_state_validity(p):
            position.append(p)
            numVertices += 1

        haltonIndex += 1

    state = [" ".join(str(x) for x in p) for p in position]

    for i in range(n):
        node_id = i
        G.add_node(str(node_id), state=state[i])

    print '[Graph Generator] Generating KD Tree...'
    position = numpy.array(position)
    tree = spatial.KDTree(position)

    print '[GraphGenerator] Populating edges...'
    for i in xrange(n):
        distances, indices = tree.query(position[numpy.newaxis, i],
                                        k=100,
                                        distance_upper_bound=radius)
        distances = distances.squeeze()
        indices = indices.squeeze()

        edgeCount = 0
        for j in xrange(indices.shape[0]):
            if indices[j] >= len(position):
                break
            if distances[j] > numpy.finfo(float).eps:
                edgeLength = numpy.linalg.norm(position[i] -
                                               position[indices[j]])
                G.add_edge(str(i), str(indices[j]), length=str(edgeLength))

                edgeCount += 1
        print '[GraphGenerator] %d of %d nodes complete, edges: %d' % (
            i, n, edgeCount)

    print '[GraphGenerator] Graph generation complete'
    return G
Beispiel #4
0
    if node.less is not None:
        if node.split_dim is not 2:  # Don't print lower if splitting on z?
            new_max = [maxes[0], maxes[1], maxes[2]]
            new_max[node.split_dim] = node.split
            draw_rectangle(node.less, depth - 1, mins, new_max, course_json,
                           scale, rotation)

    if node.greater is not None:
        new_min = [mins[0], mins[1], mins[2]]
        new_min[node.split_dim] = node.split
        draw_rectangle(node.greater, depth - 1, new_min, maxes, course_json,
                       scale, rotation)


tree = spatial.KDTree(input_data, leafsize=1)

course_json = ""
with open("flat/course_description/course_description.json", 'r') as f:
    course_json = json.loads(f.read())

    flatten_all = json.loads(
        '{"tool":0,"position":{"x":0.0,"y":"-Infinity","z":0.0},"rotation":{"x":0.0,"y":0.0,"z":0.0},"_orientation":0.0,"scale":{"x":8000.0,"y":1.0,"z":8000.0},"type":72,"value":1.0,"holeId":-1,"radius":0.0,"orientation":0.0}'
    )
    raise_all = json.loads(
        '{"tool":1,"position":{"x":0.0,"y":"-Infinity","z":0.0},"rotation":{"x":0.0,"y":0.0,"z":0.0},"_orientation":0.0,"scale":{"x":8000.0,"y":1.0,"z":8000.0},"type":72,"value":30.0,"holeId":-1,"radius":0.0,"orientation":0.0}'
    )

    # Flatten course first
    course_json["userLayers"]["terrainHeight"] = [flatten_all]
    #course_json["userLayers"]["terrainHeight"].append(raise_all)
Beispiel #5
0
def draw_alpha(X, filtration, alpha, draw_balls=True, draw_voronoi_edges=True):
    """
    Draw the delaunay triangulation in dotted lines, with the alpha faces at
    a particular scale

    Parameters
    ----------
    X: ndarray(N, 2)
        A 2D point cloud
    filtration: list of [(idxs, d)]
        List of simplices in the filtration, listed by idxs, which indexes into
        X, and with an associated scale d at which the simplex enters the filtration
    alpha: float
        The radius/scale up to which to plot balls/simplices
    draw_balls: boolean
        Whether to draw the balls (discs intersected with voronoi regions)
    draw_voronoi_edges: boolean
        Whether to draw the voronoi edges showing the boundaries of the alpha balls
    """
    
    # Determine limits of plot
    pad = 0.3
    xlims = [np.min(X[:, 0]), np.max(X[:, 0])]
    xr = xlims[1]-xlims[0]
    ylims = [np.min(X[:, 1]), np.max(X[:, 1])]
    yr = ylims[1]-ylims[0]
    xlims[0] -= xr*pad
    xlims[1] += xr*pad
    ylims[0] -= yr*pad
    ylims[1] += yr*pad

    if draw_balls:
        resol = 2000
        xr = np.linspace(xlims[0], xlims[1], resol)
        yr = np.linspace(ylims[0], ylims[1], resol)
        xpix, ypix = np.meshgrid(xr, yr)
        P = np.ones((xpix.shape[0], xpix.shape[1], 4))
        PComponent = np.ones_like(xpix)
        PBound = np.zeros_like(PComponent)
        # First make balls
        tree = spatial.KDTree(X)
        XPix = np.array([xpix.flatten(), ypix.flatten()]).T
        neighbs = tree.query(XPix, 1)[1].flatten()
        neighbs = np.reshape(neighbs, xpix.shape)
        if draw_voronoi_edges:
            PBound = filters.sobel(neighbs) > 0
        else:
            PBound = np.zeros_like(neighbs)
        for i in range(X.shape[0]):
            # First make the ball part
            ballPart = (xpix-X[i, 0])**2 + (ypix-X[i, 1])**2 <= alpha**2
            # Now make the Voronoi part
            voronoiPart = np.reshape(neighbs == i, ballPart.shape)
            Pi = ballPart*voronoiPart
            PComponent[Pi == 1] = 0
        # Now make Voronoi regions
        P[:, :, 0] = PComponent
        P[:, :, 1] = PComponent
        P[:, :, 3] = 0.2 + 0.8*PBound
        plt.imshow(np.flipud(P), cmap='magma', extent=(xlims[0], xlims[1], ylims[0], ylims[1]))

    # Plot simplices
    patches = []
    for (idxs, d) in filtration:
        if len(idxs) == 2:
            if d < alpha:
                plt.plot(X[idxs, 0], X[idxs, 1], 'k', 2)
            else:
                plt.plot(X[idxs, 0], X[idxs, 1], 'gray', linestyle='--', linewidth=1)
        elif len(idxs) == 3 and d < alpha:
            patches.append(Polygon(X[idxs, :]))
    ax = plt.gca()
    p = PatchCollection(patches, alpha=0.2, facecolors='C1')
    ax.add_collection(p)
    plt.scatter(X[:, 0], X[:, 1], zorder=0)
    plt.xlim(xlims[0], xlims[1])
    plt.ylim(ylims[0], ylims[1])
Beispiel #6
0
reader = csv.reader(file)

# Index the images by label
img_list = []
for i in range(10):
    file.seek(0)  # Back to top of csv
    next(reader, None)  # Skip the header
    j = 0
    for row in reader:
        if row[1] == labels[i]:
            img_list.append([i, int(row[0])])
            j += 1
            if j == n_samples:
                break
file.close()

vector_list = []
for img in img_list:
    hist = get_hist(img[1])
    vector_list.append([img[1], labels[img[0]], hist])
vector_tree = spatial.KDTree([i[2] for i in vector_list])

vector_averages = []
for i in range(10):
    temp = []
    for _, label, vector in vector_list:
        if label == labels[i]:
            temp.append(vector)
    vector_averages.append([i, np.mean(temp, axis=0)])
ave_vector_tree = spatial.KDTree([i[1] for i in vector_averages])
Beispiel #7
0
    for i in range(N):

        size[i] = dist_matrix_size.item(
            (i, current_hour)) + total_add_waste_size.item((i, current_hour))
        weight[i] = dist_matrix_weight.item(
            (i, current_hour)) + total_add_waste_weight.item((i, current_hour))

        if size[i] >= 100:
            size[i] = 100
            bin_full[i] = True
            dist_matrix_weight[i, current_hour:24] = np.repeat(
                dist_matrix_weight[i, current_hour],
                24 - current_hour).reshape((1, 24 - current_hour))
            current_bin_pos = pos[i]
            dist, ind = spatial.KDTree(pos).query(current_bin_pos, 2)
            ind = ind[1]
            mult_matrix_size = np.outer(size, bin_behavior)
            mult_matrix_weight = np.outer(weight, bin_behavior)
            counter = 1

            waste_reposition(N, size[ind], pos[i],
                             mult_matrix_size[i, current_hour],
                             mult_matrix_weight[i, current_hour], ind,
                             current_hour, counter)

            total_add_waste_size = np.cumsum(add_waste_size, axis=1)
            total_add_waste_weight = np.cumsum(add_waste_weight, axis=1)

        bin_name = "12%s" % i
        bin_level = size[i] + random.choice(
Beispiel #8
0
def main():
    #Input Argument Specifications
    gmArgsParser = argparse.ArgumentParser("Characterize ground motion using seismic hazard analysis and record selection")
    gmArgsParser.add_argument("-filenameBIM", required=True, help="Path to the BIM file")
    gmArgsParser.add_argument("-filenameEVENT", required=True, help="Path to the EVENT file")
    gmArgsParser.add_argument("-scenarioConfig", required=True, help="Path to the earthquake scenario configuration file")
    gmArgsParser.add_argument("-seed", type=int, default=1, help="Seed for random number generation")
    gmArgsParser.add_argument("-getRV", action='store_true', help="Flag showing whether or not this call is to get the random variables definition")
    
    #Parse the arguments
    gmArgs = gmArgsParser.parse_args()


    #Check getRV flag
    if not gmArgs.getRV:
        #We will use the template files so no changes are needed
        #We do not have any random variables for this event for now
        return 0

    #First let's process the arguments
    bimFilePath = gmArgs.filenameBIM
    eventFilePath = gmArgs.filenameEVENT
    scenarioConfigPath = gmArgs.scenarioConfig

    # if "-getRV" in inputArgs:
    #     #We will create an output that only contains empty random variables array
    #     with open(eventFilePath, 'w') as eventFile:
    #         randomVariables = {"RandomVariables":[]}
    #         json.dump(randomVariables, eventFile,  indent=4)

    #     return 0

    #Ensure a hazard cache folder exist
    if not os.path.exists("./HazardCache"):
        os.mkdir("./HazardCache")

    #TODO: we need to hash the hazard cache use that hash to check if computation is needed
    needsCompute = True 
    with open(scenarioConfigPath, 'r') as scenarioConfigFile:
        scenarioConfig = json.load(scenarioConfigFile)
        scenarioConfigFile.seek(0)
        scenarioHash = hashlib.md5(scenarioConfigFile.read()).hexdigest()
        if(os.path.exists("./HazardCache/hash")):
            with open("./HazardCache/hash", 'r') as hashFile:
                if(hashFile.read() == scenarioHash):
                    needsCompute = False
                else:
                    print("Scenario is changed and will require recomputation")
    
    recordsFolder = os.path.abspath(scenarioConfig["AppConfig"]["RecordsFolder"])

    if(needsCompute):
        computeScenario(scenarioConfig, scenarioHash, gmArgs.seed)

    
    #We need to read the building location
    with open(bimFilePath, 'r') as bimFile:
        bim = json.load(bimFile)
        location = [bim["GI"]["location"]["latitude"], bim["GI"]["location"]["longitude"]]

    #Now we can start processing the event
    with open("./HazardCache/Records_Selection.json", 'r') as selectionFile:
        recordSelection = json.load(selectionFile)

    with open("./HazardCache/Hazard_Output.json", 'r') as hazardOutputFile:
        hazardOutput = json.load(hazardOutputFile)

    siteLocations = []
    for gm in hazardOutput["GroundMotions"]:
        siteLocations.append([gm["Location"]["Latitude"], gm["Location"]["Longitude"]])

    # we need to find the nearest neighbor
    sitesTree = spatial.KDTree(siteLocations)

    nearest = sitesTree.query(location)
    selectedRecord = recordSelection["GroundMotions"][nearest[1]]
    rsn = selectedRecord["Record"]["Id"]
    scaleFactor = selectedRecord["ScaleFactor"]
    
    createNGAWest2Event(rsn, scaleFactor, recordsFolder, eventFilePath)
Beispiel #9
0
def run(orientationFolder, homolFolder, imagesFormat,
        numNeighbours, outputFile, outputFolder, num, maltOptions):
    # Check user parameters
    if not os.path.isdir(orientationFolder):
        raise Exception(orientationFolder + ' does not exist')
    includeHomol = homolFolder != ''
    if includeHomol and not os.path.isdir(homolFolder):
        raise Exception(homolFolder + ' does not exist')

    if os.path.isfile(outputFile):
        raise Exception(outputFile + ' already exists!')
    if os.path.isdir(outputFolder):
        raise Exception(outputFolder + ' already exists!')
    # create output folder
    os.makedirs(outputFolder)

    mmLocalChanDescFile = 'MicMac-LocalChantierDescripteur.xml'
    requireLocalChanDescFile = ''
    if os.path.isfile(mmLocalChanDescFile):
        requireLocalChanDescFile = mmLocalChanDescFile

    # Parse number of tiles in X and Y
    nX, nY = [int(e) for e in num.split(',')]

    # Initialize the empty lists of images and 2D points with the x,y
    # positions of the cameras
    images = []
    camera2DPoints = []

    # For each image we get the x,y position of the camera and we add the
    # image and th epoint in the lists
    orientationFiles = glob.glob(orientationFolder + '/Orientation*')
    for orientationFile in orientationFiles:
        images.append(
            os.path.basename(orientationFile).replace(
                "Orientation-",
                "").replace(
                ".xml",
                ""))
        e = etree.parse(orientationFile).getroot()
        (x, y, _) = [float(c)
                     for c in e.xpath("//Externe")[0].find('Centre').text.split()]
        camera2DPoints.append((x, y))

    if numNeighbours >= len(images):
        raise Exception("numNeighbours >= len(images)")

    # Compute the bounding box of all the camera2DPoints
    minX, minY = numpy.min(camera2DPoints, axis=0)
    maxX, maxY = numpy.max(camera2DPoints, axis=0)

    print("Bounding box: " + ','.join([str(e)
                                       for e in [minX, minY, maxX, maxY]]))
    print("Offset bounding box: " +
          ','.join([str(e) for e in [0, 0, maxX - minX, maxY - minY]]))

    # Compute the size of the tiles in X and Y
    tileSizeX = (maxX - minX) / nX
    tileSizeY = (maxY - minY) / nY

    # Create a KDTree to query nearest neighbours
    kdtree = spatial.KDTree(camera2DPoints)

    # Check that tiles are small enough with the given images
    numSamplePoints = 100
    distances = []
    for camera2DPoint in camera2DPoints[:numSamplePoints]:
        distances.append(kdtree.query(camera2DPoint, 2)[0][1])

    # For each tile first we get a list of images whose camera XY position lays within the tile
    # note: there may be empty tiles
    tilesImages = {}
    for i, camera2DPoint in enumerate(camera2DPoints):
        pX, pY = camera2DPoint
        tileIndex = getTileIndex(pX, pY, minX, minY, maxX, maxY, nX, nY)
        if tileIndex not in tilesImages:
            tilesImages[tileIndex] = [images[i], ]
        else:
            tilesImages[tileIndex].append(images[i])

    # Create output file
    oFile = open(outputFile, 'w')
    rootOutput = etree.Element('ParCommands')

    # For each tile we extend the tilesImages list with the nearest neighbours
    for i in range(nX):
        for j in range(nY):
            k = (i, j)
            (tMinX, tMinY) = (minX + (i * tileSizeX), minY + (j * tileSizeY))
            (tMaxX, tMaxY) = (tMinX + tileSizeX, tMinY + tileSizeY)
            tCenterX = tMinX + ((tMaxX - tMinX) / 2.)
            tCenterY = tMinY + ((tMaxY - tMinY) / 2.)
            if k in tilesImages:
                imagesTile = tilesImages[k]
            else:
                imagesTile = []
            imagesTileSet = set(imagesTile)

            imagesTileSet.update([images[nni] for nni in kdtree.query(
                (tCenterX, tCenterY), numNeighbours)[1]])
            imagesTileSet.update(
                [images[nni] for nni in kdtree.query((tMinX, tMinY), numNeighbours)[1]])
            imagesTileSet.update(
                [images[nni] for nni in kdtree.query((tMinX, tMaxY), numNeighbours)[1]])
            imagesTileSet.update(
                [images[nni] for nni in kdtree.query((tMaxX, tMinY), numNeighbours)[1]])
            imagesTileSet.update(
                [images[nni] for nni in kdtree.query((tMaxX, tMaxY), numNeighbours)[1]])

            if includeHomol:
                imagesTileSetFinal = imagesTileSet.copy()
                # Add to the images for this tile, othe rimages that have
                # tie-points with the current images in the tile
                for image in imagesTileSet:
                    imagesTileSetFinal.update(
                        [e.replace('.dat', '') for e in os.listdir(homolFolder + '/Pastis' + image)])
                imagesTileSet = imagesTileSetFinal

            if len(imagesTileSet) == 0:
                raise Exception('EMPTY TILE!')

            tileName = 'tile_' + str(i) + '_' + str(j)

            # Dump the list of images for this tile
            tileImageListOutputFileName = outputFolder + '/' + tileName + '.list'
            tileImageListOutputFile = open(tileImageListOutputFileName, 'w')
            tileImageListOutputFile.write('\n'.join(sorted(imagesTileSet)))
            tileImageListOutputFile.close()

            childOutput = etree.SubElement(rootOutput, 'Component')

            childOutputId = etree.SubElement(childOutput, 'id')
            childOutputId.text = tileName + '_Matching'

            childOutputImages = etree.SubElement(childOutput, 'requirelist')
            childOutputImages.text = outputFolder + '/' + \
                os.path.basename(tileImageListOutputFileName)

            childOutputRequire = etree.SubElement(childOutput, 'require')
            childOutputRequire.text = orientationFolder + " " + requireLocalChanDescFile

            childOutputCommand = etree.SubElement(childOutput, 'command')
            command = 'echo -e "\n" | mm3d Malt Ortho ".*' + imagesFormat + '" ' + os.path.basename(
                orientationFolder) + ' ' + maltOptions + ' "BoxTerrain=[' + ','.join([str(e) for e in (tMinX, tMinY, tMaxX, tMaxY)]) + ']"'
            command += '; echo -e "\n" | mm3d Tawny Ortho-MEC-Malt'
            command += '; echo -e "\n" | mm3d Nuage2Ply MEC-Malt/NuageImProf_STD-MALT_Etape_8.xml Attr=Ortho-MEC-Malt/Orthophotomosaic.tif Out=' + \
                tileName + '.ply Offs=[' + str(minX) + ',' + str(minY) + ',0]'
            childOutputCommand.text = command

            childOutputOutput = etree.SubElement(childOutput, 'output')
            childOutputOutput.text = tileName + '.ply'

    oFile.write(
        etree.tostring(
            rootOutput,
            pretty_print=True,
            encoding='utf-8').decode('utf-8'))
    oFile.close()
Beispiel #10
0
def photoshoot():

    n_sim = 1
    for value in range(n_sim):

        width = 1000
        height = 1000
        n_flock = 1000
        #n_pred = int(n_flock/25)
        n_pred = 5
        elapsed = 0

        flock = [
            Prey(*np.random.rand(2) * 1000, width, height)
            for _ in range(n_flock)
        ]
        predators = [
            Predator(*np.random.rand(2) * 1000, width, height)
            for _ in range(n_pred)
        ]

        fig = plt.figure()
        camera = Camera(fig)

        while flock and predators and len(flock) < 1100 and elapsed < 300:

            start = time.perf_counter()
            color = []
            radius = []

            x = np.empty(len(flock + predators))
            y = np.empty(len(flock + predators))
            u = np.empty(len(flock + predators))
            v = np.empty(len(flock + predators))

            pointmap = create_map(flock)

            locations = np.array(list(pointmap.keys()))
            tree = spatial.KDTree(locations, 15)
            all = flock + predators

            for boid in all:
                boid.apply_behaviour(flock, tree, locations, pointmap,
                                     predators, elapsed)

            count = 0
            for boid in all:
                boid.update()
                boid.edges()

                x[count], y[count] = get_pos(boid.position)
                u[count], v[count] = get_vel(boid.velocity)

                color.append(boid.color)
                radius.append(boid.radius)
                count += 1

            plt.quiver(x, y, u, v, color=color)
            plt.scatter(x, y, c=color, s=radius)
            camera.snap()
            end = time.perf_counter()
            print(len(flock), len(predators), elapsed)
            print(end - start)
            elapsed += 1

        anim = camera.animate()
        anim.save("simulation_{}.mp4".format(value), writer='imagemagick')
Beispiel #11
0
def talker():
    cmd_publisher = rospy.Publisher('cmd_drive', cmd_drive, queue_size=10)
    data_pub = rospy.Publisher('floats', numpy_msg(Floats), queue_size=10)
    rospy.Subscriber("/IMU", Odometry, ImuCallback)
    rospy.Subscriber("/GPS/fix", NavSatFix, retour_gps)
    rospy.init_node('LQR', anonymous=True)
    r = rospy.Rate(200)  # 10hz
    simul_time = rospy.get_param('~simulation_time', '10')
    # == Dynamic Model ======================================================

    # == Steady State   ======================================================

    B = get_model_B_matrix()

    # == LQR control (Gains)   ======================================================

    C = np.array([[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])

    R = np.array([[10000, 0], [0, 10000]])

    Q = np.array([[100, 0, 0, 0], [0, 100, 0, 0], [0, 0, 100, 0],
                  [0, 0, 0, 100]])

    #print(C)

    # == Observer  ======================================================

    #print(xhat)

    dt = 0.005  #periode d'échantionnage

    Gammax = dt * eye(4, 4)

    Gammaalpha = dt * 0.00001 * eye(4, 4)
    Gammabeta = dt**2 * eye(3, 3) * 0.00001

    while (abs(X) < 0.001):
        i = 0
    mat = np.loadtxt('/home/summit/Spido_ws/recorded_files/mat.txt')
    #figure(1)
    #plt.plot(mat[:,1:2],mat[:,2:3])
    mat[:, 1] = mat[:, 1] - mat[0, 1]
    mat[:, 2] = mat[:, 2] - mat[0, 2]
    dpsi = Psi - mat[0, 3]
    #print(dpsi,Psi)
    #figure(2)
    #plt.plot(mat[:,0],mat[:,3])
    mat[:, 3] = mat[:, 3] + dpsi
    RR = np.array([[cos(dpsi), -sin(dpsi), X], [sin(dpsi),
                                                cos(dpsi), Y], [0, 0, 1]])
    FF = np.array([
        np.transpose(mat[:, 1]),
        np.transpose(mat[:, 2]),
        np.ones((1, len(mat)))
    ])
    A = RR.dot(FF)
    mat[:, 1:2] = np.transpose(A[0])
    mat[:, 2:3] = np.transpose(A[1])
    #figure(1)
    #plt.plot(mat[:,1:2],mat[:,2:3])
    #figure(2)
    #plt.plot(mat[:,0],mat[:,3])
    #plt.show()

    Psiref = mat[0, 3]
    Psipref = 0  #mat[0,6]
    xref = mat[0, 1]
    yref = mat[0, 2]
    ey = -sin(Psiref) * (X - xref) + cos(Psiref) * (Y - yref)
    epsi = (Psi - Psiref)
    xhat = np.array([[0], [0], [0], [0]])
    #print(mat)
    #mat[:,3]=mat[:,3]-mat[0,3]+Psi
    #coords=mat[:,1:3]
    line = geom.LineString(mat[:, 1:3])
    t0 = rospy.get_time()
    #pause = rospy.ServiceProxy('/gazebo/pause_physics', Empty)

    #file = open("steerLQR.txt","w")

    cmd = cmd_drive()
    cmd.linear_speed = vit
    while (rospy.get_time() - t0 <= simul_time):
        #plt.plot(X,Y,xref,yref)

        #subprocess.check_call("rosservice call /gazebo/pause_physics", shell=True)
        point = geom.Point(X, Y)
        nearest_pt = line.interpolate(line.project(point))
        distance, index = spatial.KDTree(mat[:, 1:3]).query(nearest_pt)
        xref = mat[index, 1]
        yref = mat[index, 2]
        Psiref = mat[index, 3]
        k = mat[index, 7]
        vyref = 0  #mat[index,5]
        Psipref = 0  #vit*k#mat[index,6]
        ey = -sin(Psiref) * (X - xref) + cos(Psiref) * (Y - yref)
        epsi = (Psi - Psiref)
        #vy=-Xp*sin(Psi)+Yp*cos(Psi)
        vy = xhat[0, 0]
        eyy = xhat[2, 0]
        epsih = xhat[3, 0]

        vpsih = xhat[1, 0]

        x = np.array([[vy], [Psip], [ey], [epsi]])
        yd = np.array([[vyref], [Psipref], [0]])

        y = C.dot(x)
        #print('u',u)

        A = get_model_A_matrix(k)
        Vyss = -(k * vit * (A[(0, 1)] * B[(1, 0)] - A[(1, 1)] * B[(0, 0)] - A[
            (0, 1)] * B[(1, 1)] + A[(1, 1)] * B[
                (0, 1)])) / (A[(0, 0)] * B[(1, 0)] - A[(1, 0)] * B[(0, 0)] -
                             A[(0, 0)] * B[(1, 1)] + A[(1, 0)] * B[(0, 1)])
        Vpsiss = vit * k
        eyss = 0
        epsiss = -Vyss / vit

        bfss = -(k * vit * (A[(0, 0)] * A[(1, 1)] - A[(0, 1)] * A[
            (1, 0)])) / (A[(0, 0)] * B[(1, 0)] - A[(1, 0)] * B[(0, 0)] -
                         A[(0, 0)] * B[(1, 1)] + A[(1, 0)] * B[(0, 1)])
        brss = -bfss

        xsss = np.array([[Vyss], [Vpsiss], [eyss], [epsiss]])
        usss = np.array([[bfss], [brss]])
        P = solve_continuous_are(A, B, Q, R)
        #print(P)

        G = np.linalg.inv(R).dot(np.transpose(B)).dot(P)
        #print(G)
        #np.array([[-0.5,4.3e+13],[-0.5,-4.2e+13]])#
        M = pinv(C.dot(pinv(A - B.dot(G))).dot(B))

        u = M.dot(yd) - G.dot(x - xsss) + usss
        #u=M.dot(yd)-G.dot(x)
        #print(u)
        xhat, Gammax = kalman(xhat, Gammax, dt * B.dot(u), y, Gammaalpha,
                              Gammabeta,
                              eye(4, 4) + dt * A, C)

        #print('xhat',xhat)

        #vy1=-Xp*sin(Psi)+Yp*cos(Psi);# // vy:vitesse latérale expriméé dans le repère véhicule

        #;//-xpref*sin(psiref)+ypref*cos(psiref); // vy:vitesse latérale expriméé dans le repère véhicule
        #subprocess.check_call("rosservice call /gazebo/unpause_physics", shell=True)
        #unpause = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)

        cmd.steering_angle_front = u[0, 0]
        cmd.steering_angle_rear = u[1, 0]
        cmd_publisher.publish(cmd)

        posture = np.array([
            X, Y, Psi, ey, epsi, vy, Psip, u[0, 0], u[1, 0], xref, yref,
            Psiref, vpsih, eyy, epsih
        ],
                           dtype=np.float32)
        data_pub.publish(posture)
        #file.write(' '.join((str(rospy.get_time()-t0), str(u[0,0]), str(u[1,0]))))
        r.sleep()
    cmd.steering_angle_front = 0
    cmd.steering_angle_rear = 0
    aa = vit
    while (aa > 0):
        cmd = cmd_drive()
        aa = aa - 0.1
        if aa < 0:
            aa = 0
        cmd.linear_speed = aa
        cmd_publisher.publish(cmd)
Beispiel #12
0
    def metric_coverage_success_rate(self, grasps_list, scores_list,
                                     flex_outcomes_list, gt_grasps_list,
                                     visualize):
        """
        Computes the coverage success rate for grasps of multiple objects.

        Args:
          grasps_list: list of numpy array, each numpy array is the predicted
            grasps for each object. Each numpy array has shape (n, 4, 4) where
            n is the number of predicted grasps for each object.
          scores_list: list of numpy array, each numpy array is the predicted
            scores for each grasp of the corresponding object.
          flex_outcomes_list: list of numpy array, each element of the numpy
            array indicates whether that grasp succeeds in grasping the object
            or not.
          gt_grasps_list: list of numpy array. Each numpy array has shape of
            (m, 4, 4) where m is the number of groundtruth grasps for each
            object.
          visualize: bool. If True, it will plot the curve.
        
        Returns:
          auc: float, area under the curve for the success-coverage plot.
        """
        all_trees = []
        all_grasps = []
        all_object_indexes = []
        all_scores = []
        all_flex_outcomes = []
        visited = set()
        tot_num_gt_grasps = 0
        for i in range(len(grasps_list)):
            print('building kd-tree {}/{}'.format(i, len(grasps_list)))
            gt_grasps = np.asarray(gt_grasps_list[i]).copy()
            all_trees.append(spatial.KDTree(gt_grasps[:, :3, 3]))
            tot_num_gt_grasps += gt_grasps.shape[0]

            for g, s, f in zip(grasps_list[i], scores_list[i],
                               flex_outcomes_list[i]):
                all_grasps.append(np.asarray(g).copy())
                all_object_indexes.append(i)
                all_scores.append(s)
                all_flex_outcomes.append(f)

        all_grasps = np.asarray(all_grasps)

        all_scores = np.asarray(all_scores)
        order = np.argsort(-all_scores)
        num_covered_so_far = 0
        correct_grasps_so_far = 0
        num_visited_grasps_so_far = 0

        precisions = []
        recalls = []
        prev_score = None

        for oindex, index in enumerate(order):
            if oindex % 1000 == 0:
                print(oindex, len(order))

            object_id = all_object_indexes[index]
            close_indexes = all_trees[object_id].query_ball_point(
                all_grasps[index, :3, 3], RADIUS)

            num_new_covered_gt_grasps = 0

            for close_index in close_indexes:
                key = (object_id, close_index)
                if key in visited:
                    continue

                visited.add(key)
                num_new_covered_gt_grasps += 1

            correct_grasps_so_far += all_flex_outcomes[index]
            num_visited_grasps_so_far += 1
            num_covered_so_far += num_new_covered_gt_grasps

            if prev_score is not None and abs(prev_score -
                                              all_scores[index]) < 1e-3:
                precisions[-1] = float(
                    correct_grasps_so_far) / num_visited_grasps_so_far
                recalls[-1] = float(num_covered_so_far) / tot_num_gt_grasps
            else:
                precisions.append(
                    float(correct_grasps_so_far) / num_visited_grasps_so_far)
                recalls.append(float(num_covered_so_far) / tot_num_gt_grasps)
                prev_score = all_scores[index]

        auc = 0
        for i in range(1, len(precisions)):
            auc += (recalls[i] - recalls[i - 1]) * (precisions[i] +
                                                    precisions[i - 1]) * 0.5

        if visualize:
            import matplotlib.pyplot as plt
            plt.plot(recalls, precisions)
            plt.title('auc = {0:02f}'.format(auc))
            plt.ylim([0.0, 1.05])
            plt.xlim([0.0, 1.0])
            plt.show()

        print('auc = {}'.format(auc))
        np.save(
            os.path.join(self._output_folder,
                         '{}_vae+evaluator.npy'.format(self._signature)), {
                             'precisions': precisions,
                             'recalls': recalls,
                             'auc': auc,
                             'cfg': self._cfg
                         })

        return auc
Beispiel #13
0
        b_mean = np.mean(b_vecs, axis=0)
        b_var = np.var(b_vecs, axis=0)
        for vec_name in vec_names:
            if b in vec_name:
                #print(vec_name)
                norm_vecs.append(
                    (model[vec_names.index(vec_name)] - b_mean) / b_var)

    np.save('../models/norm_by_book_par_vecs.npy', norm_vecs)
else:
    norm_vecs = np.load('../models/norm_by_book_par_vecs.npy')

#model = Doc2Vec.load('../models/par2vec_300_20k.doc2vec')
#norm_vecs = model.docvecs.vectors_docs

tree = spatial.KDTree(norm_vecs)

#b = '../data/BookCorpus/Thriller/James_Bond-1'
#b = '../data/BookCorpus/Thriller/Da_Vinci_Code'
b = '../data/BookCorpus/Romance/Attachments'
#b = '../data/BookCorpus/Fantasy/Wheel_of_Time-12'
#b = '../data/BookCorpus/Science_fiction/Asimov47'

p = 1

distances, closest = tree.query(norm_vecs[vec_names.index(b + '_par_' +
                                                          str(p))],
                                k=10)

i = 0
for c in closest:
Beispiel #14
0
def deflectionAlongX(y, z, xVec, yDef, zDef, case):
    if case == "bending":
        Df = pd.read_csv('validation_processed/bending_processed.csv',
                         header=0,
                         index_col=0)
    elif case == "jam_bent":
        Df = pd.read_csv('validation_processed/jam_bent_processed.csv',
                         header=0,
                         index_col=0)
    elif case == "jam_straight":
        Df = pd.read_csv('validation_processed/jam_straight_processed.csv',
                         header=0,
                         index_col=0)
    else:
        print("Case not found")
        return

    uniqueYZ = Df.drop_duplicates(subset=['y', 'z'])
    A = np.zeros((uniqueYZ['z'].size, 2))
    A[:, 0] = uniqueYZ['y']
    A[:, 1] = uniqueYZ['z']
    nearestYZ = A[spatial.KDTree(A).query([y, z])[1]]
    print("Nearest (y,z) found is:", nearestYZ)
    nearestY = nearestYZ[0]
    nearestZ = nearestYZ[1]
    print("Plotting deflection in (x,y,z)-direction for case: %s" % (case))

    yFiltered = Df.loc[Df['y'] == nearestY]
    zFiltered = yFiltered.loc[yFiltered['z'] == nearestZ]
    xSort = zFiltered.sort_values(by='x')

    fig, ax = plt.subplots()
    plt.suptitle(
        "Aileron deflection in y-direction VS distance in x-direction for case: %s"
        % (case),
        fontsize=16)
    plt.rcParams["font.size"] = "12"
    plt.rcParams["axes.labelsize"] = "12"
    ax.set_ylabel('y [mm]', fontsize=12.0)
    ax.set_xlabel('z [mm]', fontsize=12.0)
    ax.tick_params(axis='both', which='major', labelsize=12)
    ax.tick_params(axis='both', which='minor', labelsize=12)

    #plt.plot(xSort['x'], xSort['u1Loc1'], label="deflection in x")
    plt.plot(xSort['x'] / 1000,
             xSort['u2Loc1'],
             label="deflection in y (validation)")
    plt.plot(xVec, yDef * 1000, label="deflection in y (numerical)")
    plt.xlabel("x [m]")
    plt.ylabel("Deflection [mm]")
    plt.grid()
    plt.legend()
    ax.autoscale()
    plt.show()

    fig, ax = plt.subplots()
    plt.suptitle(
        "Aileron deflection in z-direction VS distance in x-direction for case: %s"
        % (case),
        fontsize=16)
    plt.rcParams["font.size"] = "12"
    plt.rcParams["axes.labelsize"] = "12"
    ax.set_ylabel('y [mm]', fontsize=12.0)
    ax.set_xlabel('z [mm]', fontsize=12.0)
    ax.tick_params(axis='both', which='major', labelsize=12)
    ax.tick_params(axis='both', which='minor', labelsize=12)
    plt.plot(xVec, zDef * 1000, label="deflection in z (numerical)")
    plt.plot(xSort['x'] / 1000,
             xSort['u3Loc1'],
             label="deflection in z (validation)")
    plt.xlabel("x [m]")
    plt.ylabel("Deflection [mm]")
    plt.grid()
    plt.legend()
    ax.autoscale()
    plt.show()
    return
Beispiel #15
0
def energy_fusion(x, y, z, p):
    point = np.array([x, y, z]).transpose()
    POINT_N = point.shape[0]
    logging.info("points: {}".format(POINT_N))
    print("points: {}".format(POINT_N))

    tree = spatial.KDTree(point)
    logging.info("build tree finished")
    print('build tree finished')

    dsum = 0
    k = int(POINT_N / 100)
    for i in range(POINT_N):
        d, index = tree.query(point[i], k=K)
        for dk in d:
            dsum += dk
    dsum = dsum / POINT_N / 10
    print(dsum)
    # dsum = 0.05

    visit = [0] * len(x)
    able = [1] * len(x)

    r_new = [0] * len(x)
    g_new = [0] * len(x)
    b_new = [0] * len(x)

    for i in range(POINT_N):

        if (visit[i] == 0):
            visit[i] = 1
            root = [0] * 5

            root_label = rgb2label(p[i])
            root[root_label] += 1

            queue = []
            all = []
            queue.append(point[i])
            all.append(i)
            while (len(queue) > 0):
                temp = []
                for j in range(len(queue)):
                    d, index = tree.query(queue[j], k=K)
                    dk = np.array(d)
                    ct = 0
                    for k in index:
                        if (dk[ct] < dsum) and (ct > 0):
                            k_label = rgb2label(p[k])
                            root[k_label] += 1

                            if (visit[k] == 0):
                                if rgb2label(p[k]) == rgb2label(p[i]):
                                    temp.append(point[k])
                                    all.append(k)
                                    visit[k] = 1
                        ct += 1
                queue = temp

            for j in range(len(all)):
                # if len(all) < 3:
                #     able[all[j]] = 0
                label = np.argwhere(root == np.amax(root)).flatten().tolist()
                if len(label) == 1:
                    r_new[all[j]] = label_colours[label[0]][2]
                    g_new[all[j]] = label_colours[label[0]][1]
                    b_new[all[j]] = label_colours[label[0]][0]
                else:  # 存在多个最大值
                    l = np.argmax(p[all[j]])
                    r_new[all[j]] = label_colours[l][2]
                    g_new[all[j]] = label_colours[l][1]
                    b_new[all[j]] = label_colours[l][0]

    logging.info("refine finished")
    print('refine finished')

    result_xyz = []
    result_rgb = []
    print('able:', len(able))
    for m in range(len(able)):
        if able[m]:
            result_xyz.append([x[m], y[m], z[m]])
            result_rgb.append([r_new[m], g_new[m], b_new[m]])
    return result_xyz, result_rgb
Beispiel #16
0
outfile_index = './MERRA2_index.csv'

# first check if the file exists 
if os.path.exists(outfile_index):
    print('file exists and delelte it now')
    os.remove(outfile_index)

data_out = pd.DataFrame(columns=['sitelat','sitelon','MERRA2lati','MERRA2lonj'])

count = 0
for i in np.arange(0,n_sites):
#for i in np.arange(0,1):
    
    target_pts = [list_sitelon[i],list_sitelat[i]]
    #find the nearest model grid and return the index 
    distance,index = spatial.KDTree(merra2_grid).query(target_pts)
    #print ('# of distances', distance)
    
    # the nearest model location (in lat and lon index)
    lonlat_ind = np.unravel_index(index, merra2_dim)
    #print ('latlon index is ', lonlat_ind)
     
    data_out = data_out.append([np.nan], ignore_index=True)
    #print ('save data', i)
    data_out.loc[count,['sitelat','sitelon','MERRA2lati','MERRA2lonj']]=[list_sitelat[i],list_sitelon[i], lonlat_ind[0], lonlat_ind[1]]
    count = count + 1
    print ('ground lat-lon and MERRA-2 lat-lon are:', list_sitelat[i],list_sitelon[i],lat_merra2_2d[lonlat_ind[0], lonlat_ind[1]], lon_merra2_2d[lonlat_ind[0], lonlat_ind[1]])

data_out=data_out.drop(0,1)
print (data_out)
data_out.to_csv(outfile_index,na_rep='NaN', index=False)
 def calculateNeighbours(self, sigle_bin, deep):
     tree = spatial.KDTree(self._getCoord())
     pts = np.array(
         [sigle_bin["coordinates"]["lat"], sigle_bin["coordinates"]["lng"]])
     distance, location = tree.query(pts, k=deep)
     return location[-1:][0]
Beispiel #18
0
 def __init__(self, x):
     dummy = np.zeros_like(x)
     self.tree = spatial.KDTree(zip(x, dummy))
Beispiel #19
0
    [12, 16, 16],
    [68, 76, 53],
    [228, 178, 79],
    [99, 136, 155],
    [229, 228, 229],
    [205, 205, 207],
    [172, 175, 179],
    [118, 102, 70],
    [166, 144, 101],
    [204, 204, 203],
    [177, 179, 179],
    [251, 251, 251],
    [47, 59, 60],
    [13, 17, 18],
]
a = spatial.KDTree(RGB)  # K近邻算法
for pt in rgb_list:
    b = a.query(pt)

    NearestRGB = (RGB[spatial.KDTree(RGB).query(pt)[1]])

    s = '#' + format(NearestRGB[0], 'x').zfill(2) + format(
        NearestRGB[1], 'x').zfill(2) + format(NearestRGB[2], 'x').zfill(2)
    ColorHex = s.upper()  # "#8B7355"  # "#8B7355"
    ColorDiff = '(' + '{0:+d}'.format(
        NearestRGB[0] - pt[0]) + ',' + '{0:+d}'.format(
            NearestRGB[1] - pt[1]) + ',' + '{0:+d}'.format(NearestRGB[2] -
                                                           pt[2]) + ')'
    try:
        ColorName = HexNameDict[ColorHex]
        chines_name = color_name_zh[ColorHex]
Beispiel #20
0
def main():
    # Initialize data
    # dt is a data container containing rows/labels
    dt = np.dtype([('labels', np.str_, 1), ('data', np.int16, (16, ))])
    testData = np.zeros((5000, 1), dt)
    trainData = np.zeros((15000, 1), dt)

    counter = 0
    # Read in data
    with open('letter-recognition.data') as f:
        for line in f:
            content = line.split(',')

            label = content.pop(0)
            data = np.int16(content)

            if counter < 15000:
                trainData.put(counter, [(label, data)])
            else:
                testData.put(counter - 15000, [(label, data)])

            counter = counter + 1
        # end for line in f
    # end with open

    # as per assignment
    d = [100, 1000, 2000, 5000, 10000, 15000]
    k = [1, 3, 5, 7, 9]

    # KNN with various parameters (sample size, number of neighbors)
    for samples in d:
        sampleData = trainData[random.sample(range(15000), samples)]
        for neighbors in k:
            predictions = knn(neighbors, sampleData, testData)
            performance = (
                sum(np.int16(predictions == np.squeeze(testData['labels']))) /
                5000.0) * 100
            print "KNN With %d sample size and %d neighbors. %.4f%% accuracy\n" % (
                samples, neighbors, performance)
        # end for neighbors
    # end for samples

    # 1-NN using KDTree
    predictions = []
    # build KDTree
    tree = spatial.KDTree(np.squeeze(trainData['data']))
    # this was slow no matter what I tried..
    for i in range(0, len(testData)):
        dist, ind = tree.query(np.squeeze(testData['data'][i]))
        predictions.append(trainData['labels'][ind][0])
    # end for i in range
    performance = (sum(np.int16(predictions == np.squeeze(testData['labels'])))
                   / 5000.0) * 100
    print "1-NN Using KDTree. %.4f%% accuracy\n" % (performance)

    # 1-NN using condensed training set
    condensedData = condense(trainData)
    print "Condensed data %d large\n" % (len(condensedData))
    predictions = knn(1, condensedData, testData)
    performance = (sum(np.int16(predictions == np.squeeze(testData['labels'])))
                   / 5000.0) * 100
    print "1-NN Using KDTree. %.4f%% accuracy\n" % (performance)
Beispiel #21
0
            #Setup for getting nearest neighbors
            if cIdx == 0:

                #Get coordinates for nans
                nanBool = np.isnan(cData)
                nNan = np.sum(nanBool)
                cNanIdx = np.array(np.where(nanBool)).T

                #Get coordinates for useable data
                cUseIdx = np.array(
                    np.where(
                        np.logical_and(cData != 0.0,
                                       np.logical_not(nanBool)))).T

                #Make tree for finding nearest neighbours
                tree = spat.KDTree(cUseIdx)

                #Get 6 nearest neighbors for each nan
                nanD, nanNe = tree.query(cNanIdx, 6)

                #Convert neighbors to image coordinates
                interpIdx = cUseIdx[nanNe]

            #Loop through nans
            for nIdx in range(nNan):

                #Get values for neighbors
                neVal = cData[interpIdx[nIdx, :, 0], interpIdx[nIdx, :, 1],
                              interpIdx[nIdx, :, 2]]

                #Calculate distance weighted average
Beispiel #22
0
    # add edge from G_temp to G
    # so that degree of nodes in G is <= 25% of nodes
    for edge in G_temp.edges():
        if edge not in G.edges():
            G.add_edge(edge[0], edge[1])

    # get node positions from G_temp
    positions = []
    for node in G_temp.nodes():
        x, y = G.node[node]['pos']
        positions.append([x, y])

    # r is radius
    r = 1.0 / (nnodes - 5)
    kdtree = spatial.KDTree(positions)
    pairs = kdtree.query_pairs(r)

    # if nodes are close (inside r) but they don't have edges, add an edge between them
    for edge in pairs:
        if edge not in G.edges():
            G.add_edge(edge[0], edge[1])

    # draw real network
    title = '<br>Network graph'
    draw_Graph(G, "networkx.png", title)
    print("\nNetwork is saves in ", 'networkx.png')

    # get real network info for clustering
    edge_trace = get_edge_trace(G)
    node_trace = get_node_trace(G)
    for test_digit in digits:
        for utterance in range(1, 3):

            n_frames = n_frame_dict[test_digit][test_speaker]
            test_mat = codeBook[test_digit][test_speaker][sum(n_frames[0:(
                utterance - 1)]):sum(n_frames[0:utterance])]

            sum_dist = np.zeros(10)
            for digit in digits:
                for l in range(len(test_mat)):
                    test_vec = np.asarray(test_mat)[l, :]
                    min_dist = float('Inf')
                    for speaker in train_speakers:
                        temp_list = codeBook[digit][speaker]
                        curr_dist, index = spatial.KDTree(temp_list).query(
                            test_vec)
                        if (curr_dist < min_dist):
                            min_dist = curr_dist

                    sum_dist[digits.index(digit)] += min_dist

            pred_digit = np.argmin(sum_dist)
            print("For ", test_speaker, " predicted digit = ", pred_digit,
                  " ground truth = ", test_digit)
            confusion_matrix[digits.index(test_digit), pred_digit] += 1

    np.save('BOF_confusion_matrix', confusion_matrix)

wer = 1 - np.trace(confusion_matrix) / 640
print wer
contents, extracted_contents = list(), list()
flag = True
with open(sys.argv[1], 'r') as f:
    for line in f:
        sline = line.split()
        contents.append([float(sline[0]), float(sline[1]), float(sline[2])])
        if flag:
            extracted_contents.append(contents[-1])
        flag = not flag  # Switch between True and False

print(f'{"Method":<10}{"Time/s":>10}')

# 存储用于建立KD树的时间,KD树是用extracted_contents_a建立的
start = time.time()
extracted_contents_a = np.array(extracted_contents)
tree = spatial.KDTree(extracted_contents_a[:, 0:2])
t0 = time.time() - start

# 半径固定
radius = 50
for j in range(8, 11):
    start = time.time()
    with open('f' + str(j) + '.txt', 'w') as f:
        for i in range(len(contents)):
            if i % 2:
                cur = contents[i]
                if j == 8:
                    z = LI(i)
                    m_name = 'LI'
                elif j == 9:
                    z = IDW(i)
Beispiel #25
0
nPointsOut = 200
print("Number of points: ", nPoints)
in_mesh = np.random.random((nPoints, 2))

haltonPoints = halton_sequence(nPoints, 2)
for i in range(0, nPoints):
    in_mesh[i, 0] = haltonPoints[0][i]
    in_mesh[i, 1] = haltonPoints[1][i]

haltonPoints = halton_sequence(nPointsOut, 2)
out_mesh = np.random.random((nPointsOut, 2))
#for i in range(0,nPointsOut):
#	out_mesh[i,0] = haltonPoints[0][i] + 0.1*randint(0, 1)
#	out_mesh[i,1] = haltonPoints[1][i] + 0.1*randint(0, 1)

tree = spatial.KDTree(list(zip(in_mesh[:, 0], in_mesh[:, 1])))
nearest_neighbors = []
shape_params = []

plt.scatter(in_mesh[:, 0], in_mesh[:, 1], label="In Mesh", s=2)
plt.scatter(out_mesh[:, 0], out_mesh[:, 1], label="Out Mesh", s=2)
plt.show()

for j in range(0, nPoints):
    queryPt = (in_mesh[j, 0], in_mesh[j, 1])
    nnArray = tree.query(queryPt, 2)
    #print(nnArray[0][1])
    nearest_neighbors.append(nnArray[0][1])
    shape_params.append(0)

for i in range(0, 1):
Beispiel #26
0
		print(file)
		v, e = read_graph(file)

		uf = UnionFind(len(v))
		for s, t in e:
			uf.union(s, t)

		dv = {uf_id: set() for uf_id in set(uf._id)}
		de = {uf_id: [] for uf_id in set(uf._id)}
		for s, t in e:
			assert(uf.find(s, t))
			de[uf._root(s)].append((s, t))
			dv[uf._root(s)].add(s)
			dv[uf._root(s)].add(t)

		kd_tree = spatial.KDTree(v)
		valid_uf_id = set([uf_id for uf_id in dv if len(dv[uf_id]) >= 3])
		pairs = kd_tree.query_pairs(r = 128)
		pairs = [(i, j) for i, j in pairs if uf._root(i) != uf._root(j)]
		pairs = [(i, j) for i, j in pairs if uf._root(i) in valid_uf_id and  uf._root(j) in valid_uf_id]
		pairs.extend([(j, i) for i, j in pairs])

		e_rec = []
		for uf_id in valid_uf_id:
			e_rec.extend(de[uf_id])
		e_rec.extend(pairs)
		e_rec = [(v[s], v[t]) for s, t in e_rec]
		v_rec = set()
		for s, t in e_rec:
			v_rec.add(s)
			v_rec.add(t)
Beispiel #27
0
 def __trainLocal__(self,featureVals,targetVals):
   """
     Trains ROM.
     @ In, featureVals, np.ndarray, feature values
     @ In, targetVals, np.ndarray, target values
   """
   #check to make sure ROM was initialized
   if not self.initialized:
     self.raiseAnError(RuntimeError,'ROM has not yet been initialized!  Has the Sampler associated with this ROM been used?')
   self.raiseADebug('training',self.features,'->',self.target)
   self.featv, self.targv = featureVals,targetVals
   self.polyCoeffDict = {key: dict({}) for key in self.target}
   #check equality of point space
   self.raiseADebug('...checking required points are available...')
   fvs = []
   tvs = {key: list({}) for key in self.target}
   sgs = list(self.sparseGrid.points())
   missing=[]
   kdTree = spatial.KDTree(featureVals)
   #TODO this is slowest loop in this algorithm, by quite a bit.
   for pt in sgs:
     #KDtree way
     distances,idx = kdTree.query(pt,k=1,distance_upper_bound=1e-9) #FIXME how to set the tolerance generically?
     #KDTree repots a "not found" as at infinite distance with index len(data)
     if idx >= len(featureVals):
       found = False
     else:
       found = True
       point = tuple(featureVals[idx])
     #end KDTree way
     if found:
       fvs.append(point)
       for cnt, target in enumerate(self.target):
         tvs[target].append(targetVals[idx,cnt])
     else:
       missing.append(pt)
   if len(missing)>0:
     msg='\n'
     msg+='DEBUG missing feature vals:\n'
     for i in missing:
       msg+='  '+str(i)+'\n'
     self.raiseADebug(msg)
     self.raiseADebug('sparse:',sgs)
     self.raiseADebug('solns :',fvs)
     self.raiseAnError(IOError,'input values do not match required values!')
   #make translation matrix between lists, also actual-to-standardized point map
   self.raiseADebug('...constructing translation matrices...')
   translate={}
   for i in range(len(fvs)):
     translate[tuple(fvs[i])]=sgs[i]
   standardPoints = {}
   for pt in fvs:
     stdPt = []
     for i,p in enumerate(pt):
       varName = self.sparseGrid.varNames[i]
       stdPt.append( self.distDict[varName].convertToQuad(self.quads[varName].type,p) )
     standardPoints[tuple(pt)] = stdPt[:]
   #make polynomials
   self.raiseADebug('...constructing polynomials...')
   self.norm = np.prod(list(self.distDict[v].measureNorm(self.quads[v].type) for v in self.distDict.keys()))
   for i,idx in enumerate(self.indexSet):
     idx=tuple(idx)
     for target in self.target:
       self.polyCoeffDict[target][idx]=0
       wtsum=0
       for pt,soln in zip(fvs,tvs[target]):
         tupPt = tuple(pt)
         stdPt = standardPoints[tupPt]
         wt = self.sparseGrid.weights(translate[tupPt])
         self.polyCoeffDict[target][idx]+=soln*self._multiDPolyBasisEval(idx,stdPt)*wt
       self.polyCoeffDict[target][idx]*=self.norm
   self.amITrained=True
   self.raiseADebug('...training complete!')
Beispiel #28
0
def compute_grid_dispersion(df_indices,
                            df_osm_built,
                            kwargs={
                                "radius_search": 750,
                                "use_median": True,
                                "K_nearest": 50
                            }):
    """ 
	Creates grid and calculates dispersion indices.

	Parameters
	----------
	df_indices : geopandas.GeoDataFrame
		data frame containing the (x,y) reference points to calculate indices
	df_osm_built : geopandas.GeoDataFrame
		data frame containing the building's geometries
	kw_args: dict
		additional keyword arguments for the indices calculation
			radius_search: int
				circle radius to consider the dispersion calculation at a local point
			use_median : bool
				denotes whether the median or mean should be used to calculate the indices
			K_nearest : int
				number of neighboring buildings to consider in evaluation

	Returns
	----------
	geopandas.GeoDataFrame
		data frame with the added column for dispersion indices
	"""
    log("Dispersion calculation")
    start = time.time()

    # Get radius search: circle radius to consider the dispersion calculation at a local point
    radius_search = kwargs["radius_search"]
    # Use the median or mean computation ?
    use_median = kwargs["use_median"]

    # Assign dispersion calculation method
    if (kwargs["use_median"]):
        _calculate_dispersion = closest_building_distance_median
    else:
        _calculate_dispersion = closest_building_distance_average

    # Calculate the closest distance for each building within K_nearest centroid buildings
    _apply_polygon_closest_distance_neighbor(df_osm_built,
                                             K_nearest=kwargs["K_nearest"])

    # For dispersion calculation approximation, create KDTree with buildings centroid
    coords_data = [
        point.coords[0]
        for point in df_osm_built.loc[df_osm_built.closest_d.notnull()].
        geometry.apply(lambda x: x.centroid)
    ]
    # Create KDTree
    tree = spatial.KDTree(coords_data)

    # Compute dispersion indices
    index_column = "dispersion"
    df_indices[index_column] = df_indices.geometry.apply(
        lambda x: _calculate_dispersion(x, tree, df_osm_built.closest_d,
                                        radius_search))

    # Remove added column
    df_osm_built.drop('closest_d', axis=1, inplace=True)

    end = time.time()
    log("Dispersion calculation time: " + str(end - start))
Beispiel #29
0
# what is peak of label below "min peak elev"
# do we need this? no.

from scipy import spatial
# peaktree = np.array([peaks_xy[0], peaks_xy[1]]).transpose()
# tree = spatial.KDTree(peaktree, leafsize=10)
# tree.query([0, 0])

peaklist.loc[:, "iso_expected"] = 0

# how do we determine isolation?
# ugly: threshold above peak, find nearest neighbor
# cleaner: go through all layers again
for ll in range(1, len(topo_elev)):
    above_thresh = np.array(list(zip(*np.where(elev > topo_elev[ll]))))
    tree = spatial.KDTree(above_thresh, leafsize=10)
    query_list = np.array(
        list(
            zip(*[
                peaklist.peak_x[peaklist.band_below == ll -
                                1], peaklist.peak_y[peaklist.band_below == ll -
                                                    1]
            ])))
    distance, index = tree.query(query_list)
    peaklist.loc[peaklist.band_below == ll - 1, "iso_expected"] = distance
    print(ll)
    # build nntree for each threshold, seed with peaks just below layer

# interpret prominence_max to layers (floor)
prominence_max_steps = int(prominence_max / z_step)
def sliding_contour_finder(image,
                           stepsize,
                           winW,
                           winH,
                           neighborhood,
                           border_contour,
                           skip_flood=False,
                           debug=False,
                           **kwargs):
    """Uses a sliding-window approach to find contours across a large image. Uses KDTree algorithm to
    remove duplicated contours from overlapping windows.

    Parameters
    ----------
    image : <numpy.ndarray> Query image
    stepsize : <int> Slide step size in pixels (currently the same in x and y directions)
    winW : <int> Window width in pixels
    winH : <int> Window height in pixels
    neighborhood : <int> Neighborhood size in pixels determining a unique contour
    **kwargs : Kwargs passed to `mcf`

    Returns
    -------
    contours : <list> A list of contours
    smooth_contours : <list> A list of smoothed contours"""
    """Create windows for mini contour finder"""
    if debug: print("Creating windows...")

    # Create image of border
    clone = image.copy()
    blank = np.zeros(clone.shape[0:2], dtype=np.uint8)
    border_mask = cv2.drawContours(blank.copy(), border_contour, 0, (255), -1)
    # mask input image (leaves only the area inside the border contour)
    cutout = cv2.bitwise_and(clone, clone, mask=border_mask)

    n_windows = len(
        list(
            sliding_window(image=cutout.copy(),
                           stepSize=stepsize,
                           windowSize=(winW, winH))))
    windows = sliding_window(image=cutout.copy(),
                             stepSize=stepsize,
                             windowSize=(winW, winH))

    contours = []
    moments = []
    for i, (x, y, window) in tqdm(enumerate(windows),
                                  total=n_windows,
                                  desc='Windows'):
        if debug:
            print(("Window {}, x0: {}, y0: {}, shape: {}".format(
                i, x, y, np.shape(window))))
        if window.shape[0] != winH or window.shape[1] != winW: continue
        if window.sum() == 0: continue
        """Running mini contour finder in window"""
        if debug: print("Running mini contour finder...")
        window_contours = mcf(window, skip_flood=skip_flood, **kwargs)
        if debug:
            print("Found {} contours in window {}".format(
                len(window_contours), i))
        """Remove overlapping contours"""
        if debug: print("Refining contours...")
        for c in window_contours:
            c[:, :, 0] += x
            c[:, :, 1] += y

            M = cv2.moments(c)
            if M["m00"] != 0:
                cX = int((M["m10"] / M["m00"]))  # moment X
                cY = int((M["m01"] / M["m00"]))  # moment Y
            else:
                cX, cY = 0, 0

            if len(moments) == 0:
                contours.append(c)
                moments.append([cX, cY])
            else:  # if previous moments exist, find the distance and index of the nearest neighbor
                distance, index = spatial.KDTree(moments).query([cX, cY])
                if distance > neighborhood:  # add point if moment falls outside of neighborhood
                    contours.append(c)
                    moments.append([cX, cY])

    if debug: print("Found {} non-overlapping contours".format(len(contours)))

    return contours