Пример #1
0
	parser.add_argument("--eps", help='epsilon value for DBSCAN (default: 0.00005)', type=float, default=0.00005)
	args = parser.parse_args()

	method = args.meth
	if method == "kmeans":
		sys.stderr.write("Clustering with K-Means method...\n")
		options = {'init':'random', 'n_clusters':args.n, 'n_jobs':-1, 'n_init':10}
	elif method == "affinity":
		sys.stderr.write("Clustering with Affinity Propagation method...\n")
		options = {'preference':-50}
	elif method == "dbscan":
		sys.stderr.write("Clustering with DBScan method...\n")
		options = {'eps':args.eps}
	elif method == "ward":
		sys.stderr.write("Clustering with Ward method...\n")
		options = {'n_clusters':args.n}
	elif method == "meanshift":
		sys.stderr.write("Clustering with Mean Shift method...\n")
		options = {'bandwidth':bandwidth, 'bin_seeding':True}
	else:
		sys.stderr.write("Error: Unsupported clustering method.\n")
		exit(0)

	# Load file
	pcd_list = ut.loadPoints(args.file, [2,3,4])

	# Cluster the points
	clusters = cluster(pcd_list, method, options)
	# Show the clusters
	viewer(clusters, draw_elevation=False, c=clusters[:,2])
    parser = argparse.ArgumentParser()
    parser.add_argument("input", help='path to the input file', type=str)
    parser.add_argument("--degree",
                        "-d",
                        help='degree of the fitting polynoms (default: 3)',
                        type=int,
                        default=3)
    parser.add_argument("--slice",
                        "-s",
                        help='size of the chunks (default: 20000)',
                        type=int,
                        default=20000)
    args = parser.parse_args()

    # Load input file
    pcd_list = np.array(ut.loadPoints(args.input, [0, 1, 2]))

    # Compute the number of iterations to do given the slice size
    to_process = len(pcd_list)
    slice_size = args.slice
    iterations = to_process / slice_size
    if (iterations == 0 or to_process % slice_size == 0):
        iterations += 1

    # Initialize the iteration variable
    i = 0

    # Get the matplotlib object that shows the point cloud
    plt = viewer(pcd_list, get=True)
    # Compute the fitting polynoms and plot them
    for i in range(iterations):
Пример #3
0
        'lidar_id': 1,
        'lat': 2,
        'lon': 3,
        'ele': 4,
        'val': 5
    }
    fields_out = {
        0: 'id',
        1: 'lidar_id',
        2: 'lat',
        3: 'lon',
        4: 'ele',
        5: 'val'
    }

    for c in chunks:
        pcd_list = ut.loadPoints(c, fields_in)
        # filter on elevation by getting the points between the 35th
        # percentile and the 65th percentile for each chunk
        elevations = [pcd["ele"] for pcd in pcd_list]
        options["minele"] = np.percentile(elevations, 35)
        options["maxele"] = np.percentile(elevations, 65)

        filtered_list += filterPoints(pcd_list, options)
        i += 1.
        sys.stderr.write("%.2f\r" % float(100 * i / l))

    sys.stderr.write("\nDone\n")

    ut.savePoints(filtered_list, args.output, fields_out)
Пример #4
0
if "__main__" == __name__:
    """
    Compute fitting polynoms for a given point cloud and print them over the point cloud.

    input: input file containing the point cloud
    degree: degree of the fitting polynoms
    slice: size of the chunks to slice the data in order to process it faster
    """
    parser = argparse.ArgumentParser()
    parser.add_argument("input", help='path to the input file', type=str)
    parser.add_argument("--degree", "-d", help='degree of the fitting polynoms (default: 3)', type=int, default=3)
    parser.add_argument("--slice", "-s", help='size of the chunks (default: 20000)', type=int, default=20000)
    args = parser.parse_args()

    # Load input file
    pcd_list = np.array(ut.loadPoints(args.input, [0,1,2]))

    # Compute the number of iterations to do given the slice size
    to_process = len(pcd_list)
    slice_size = args.slice
    iterations = to_process/slice_size
    if (iterations == 0 or to_process%slice_size == 0):
        iterations += 1

    # Initialize the iteration variable
    i = 0
    
    # Get the matplotlib object that shows the point cloud
    plt = viewer(pcd_list, get=True)
    # Compute the fitting polynoms and plot them
    for i in range(iterations):
Пример #5
0
        ax.scatter(np_chunk[:, 0], np_chunk[:, 1], c=c, s=s, edgecolors='none')
    plt.xlabel('Latitude')
    plt.ylabel('Longitude')

    if get:
        return plt
    else:
        plt.show()


if "__main__" == __name__:
    parser = argparse.ArgumentParser()
    parser.add_argument("file", help='file to display', type=str)
    parser.add_argument("--w3d",
                        help='flag to show the points in a 3D graph',
                        action='store_true')
    parser.add_argument("x",
                        help='field of the file that contains the x value',
                        type=int)
    parser.add_argument("y",
                        help='field of the file that contains the y value',
                        type=int)
    parser.add_argument("z",
                        help='field of the file that contains the z value',
                        type=int)
    args = parser.parse_args()

    field = [args.x, args.y, args.z]
    chunk = ut.loadPoints(args.file, field)
    viewer(chunk, draw_elevation=args.w3d)
			largest_c += getCluster(clusters, s[0])
		else:
			break

	return largest_c


if "__main__" == __name__:
	parser = argparse.ArgumentParser()
	parser.add_argument("input", help='input file that contains the point cloud', type=str)
	parser.add_argument("output", help='path to the output file', type=str)
	parser.add_argument("--slice", "-s", help='slice size (default: 15000)', type=int, default=15000)
	args = parser.parse_args()

	# Load input file
	pcd_list = ut.loadPoints(args.input, [2,3,4])

	# Compute the number of iterations to do given the slice size
	to_process = len(pcd_list)
	slice_size = args.slice
	iterations = to_process/slice_size
	if (iterations == 0 or to_process%slice_size == 0):
		iterations += 1
	largest_clusters = []
	i = 0

	for i in range(iterations):
		begin = i*slice_size
		if (i+1)*slice_size < to_process:
			end = (i+1)*slice_size
		else:
Пример #7
0
	# initialize options for the filter
	options = {}

	# filter on the intensity of the points
	# let's keep points that have an intensity value > 190

	options["minval"] = 190

	sys.stderr.write("Loading and filtering " + str(int(l)) + " chunks...\n")

	filtered_list = []
	fields_in = {'id':0, 'lidar_id':1, 'lat':2, 'lon':3, 'ele':4, 'val': 5}
	fields_out = {0:'id', 1:'lidar_id', 2:'lat', 3:'lon', 4:'ele', 5:'val'}

	for c in chunks:
		pcd_list = ut.loadPoints(c, fields_in)
		# filter on elevation by getting the points between the 35th
		# percentile and the 65th percentile for each chunk
		elevations = [pcd["ele"] for pcd in pcd_list]
		options["minele"] = np.percentile(elevations, 35)
		options["maxele"] = np.percentile(elevations, 65)

		filtered_list += filterPoints(pcd_list, options)
		i += 1.
		sys.stderr.write("%.2f\r" % float(100*i/l))

	sys.stderr.write("\nDone\n")

	ut.savePoints(filtered_list, args.output, fields_out)
Пример #8
0
		ax = fig.add_subplot(111, projection='3d')
		ax.scatter(np_chunk[:,0], np_chunk[:,1], np_chunk[:,2], s=s)
	else:
		ax = fig.add_subplot(111)
		ax.grid(True,linestyle='-',color='0.75')
		if c == None:
			c = np_chunk[:,2]
		elif type(c) == int:
			c = [1]*len(np_chunk[:,0])
		ax.scatter(np_chunk[:,0], np_chunk[:,1], c=c, s=s, edgecolors='none')
	plt.xlabel('Latitude')
	plt.ylabel('Longitude')

	if get:
		return plt
	else:
		plt.show()

if "__main__" == __name__:
	parser = argparse.ArgumentParser()
	parser.add_argument("file", help='file to display', type=str)
	parser.add_argument("--w3d", help='flag to show the points in a 3D graph', action='store_true')
	parser.add_argument("x", help='field of the file that contains the x value', type=int)
	parser.add_argument("y", help='field of the file that contains the y value', type=int)
	parser.add_argument("z", help='field of the file that contains the z value', type=int)
	args = parser.parse_args()

	field = [args.x, args.y, args.z]
	chunk = ut.loadPoints(args.file, field)
	viewer(chunk, draw_elevation=args.w3d)
Пример #9
0
        sys.stderr.write("Clustering with K-Means method...\n")
        options = {
            'init': 'random',
            'n_clusters': args.n,
            'n_jobs': -1,
            'n_init': 10
        }
    elif method == "affinity":
        sys.stderr.write("Clustering with Affinity Propagation method...\n")
        options = {'preference': -50}
    elif method == "dbscan":
        sys.stderr.write("Clustering with DBScan method...\n")
        options = {'eps': args.eps}
    elif method == "ward":
        sys.stderr.write("Clustering with Ward method...\n")
        options = {'n_clusters': args.n}
    elif method == "meanshift":
        sys.stderr.write("Clustering with Mean Shift method...\n")
        options = {'bandwidth': bandwidth, 'bin_seeding': True}
    else:
        sys.stderr.write("Error: Unsupported clustering method.\n")
        exit(0)

    # Load file
    pcd_list = ut.loadPoints(args.file, [2, 3, 4])

    # Cluster the points
    clusters = cluster(pcd_list, method, options)
    # Show the clusters
    viewer(clusters, draw_elevation=False, c=clusters[:, 2])