def plan_path(self):

        print("Searching for a path ...")
        TARGET_ALTITUDE = 5
        SAFETY_DISTANCE = 5

        self.target_position[2] = TARGET_ALTITUDE

        # TODO: read lat0, lon0 from colliders into floating point values
        data = np.genfromtxt('colliders.csv', delimiter=',', dtype='str', max_rows=1)
        _, lat0 = data[0].strip().split(' ')
        _, lon0 = data[1].strip().split(' ')
        # TODO: set home position to (lon0, lat0, 0)
        self.set_home_position(float(lon0), float(lat0), 0)
        # TODO: retrieve current global position
        curr_glb_pos = [self._longitude, self._latitude, self._altitude]
        # TODO: convert to current local position using global_to_local()
        self._north, self._east, self._down = global_to_local(curr_glb_pos, self.global_home)
        print('global home {0}, position {1}, local position {2}'.format(self.global_home, self.global_position,
                                                                         self.local_position))
        # Read in obstacle map
        data = np.loadtxt('colliders.csv', delimiter=',', dtype='Float64', skiprows=2)

        sampler = Sampler(data, SAFETY_DISTANCE)
        self.polygons = sampler._polygons
        nodes = sampler.sample(3)
        print('nodes_len: ', len(nodes))
        xx = [[p[0], p[1], TARGET_ALTITUDE, 0] for p in nodes]
        data = msgpack.dumps(xx)
        print(data)
        self.connection._master.write(data)
Exemple #2
0
def get_path(start=(0, 0, 6), goal=(331, 116, 19)):

    data = np.loadtxt('colliders.csv',
                      delimiter=',',
                      dtype='Float64',
                      skiprows=2)
    north_offset = -316
    east_offset = -445
    sampler = Sampler(data)
    print("AAA")
    polygons = sampler._polygons
    print("BBB")
    nodes = sampler.sample(3000)
    # (339, 380) (441, 519)
    # start = (0, 0, 6)
    # goal = (331, 116, 19)
    nodes += [start, goal]
    print(nodes)
    g = create_graph(nodes, k=10, polygons=polygons)
    path, _ = a_star_graph(g, heuristic, start, goal)
    waypoints = [[int(p[0]), int(p[1]), int(p[2]), 0] for p in path]
    path_dict = {'path': waypoints}

    print(waypoints)
    with open('path.json', 'w') as json_file:
        json.dump(path_dict, json_file)

    with open('path.json', 'r') as json_file2:
        dta = json.load(json_file2)

    print(dta['path'])
    return waypoints
Exemple #3
0
    def expectation(self):
        if self.sampler is None:
            self.sampler = Sampler(self.dbm, self.sample_size, self.initial_update, self.update_time)
        
        values = self.sampler.sampling()

        expectations = [None for i in self.dbm.weights]
        signal_up = [None for i in self.dbm.weights]
        signal_down = [None for i in self.dbm.weights]
        multiply_up = [None for i in self.dbm.weights]
        multiply_down = [None for i in self.dbm.weights]
        for i,_ in enumerate(self.dbm.weights):
            signal_up[i] = self.dbm.signal(values[i], i+1)
            signal_down[i] = self.dbm.signal(values[i+1], -(i+1))
            multiply_up[i] = signal_up[i][:, tf.newaxis, :] - values[i][:, :, tf.newaxis] * self.dbm.weights[i]
            multiply_down[i] = signal_down[i][:, :, tf.newaxis] - values[i+1][:, tf.newaxis, :] * self.dbm.weights[i]
        
        # expectation[0]
        x = multiply_up[0] + self.mariginalize2(multiply_up[1], self.dbm.weights[1], axis=2)[:, tf.newaxis, :]
        y = multiply_down[0]
        z = self.dbm.weights[0][tf.newaxis, :, :]
        expectations[0] = self.mariginalize(x, y, z)

        # expectation[1]
        x = multiply_down[1] + self.mariginalize2( multiply_down[0], self.dbm.weights[0], axis=1)[:, :, tf.newaxis]
        y = multiply_up[1]
        z = self.dbm.weights[1][tf.newaxis, :, :]
        expectations[1] = self.mariginalize(x, y, z)

        return expectations
def create_graph_with_nodes(data, lat0, lon0, alt):
    print('[start]create_grid_with_nodes')

    sampler = Sampler(data)

    global polygons
    polygons = sampler._polygons
    nodes = sampler.sample(300)

    graph = create_graph(nodes, 10)
    print("Number of edges", len(graph.edges))
    grid = create_grid(data, sampler._zmax, 1)

    nmin = np.min(data[:, 0])
    emin = np.min(data[:, 1])

    start = (lat0 + nmin, lon0 + emin, alt)

    start_closest = closest_point(graph, start)
    k = np.random.randint(len(graph.nodes))
    print(k, len(graph.nodes))
    goal = list(graph.nodes)[k]
    print('start {0}'.format(start_closest))
    print('goal {0}'.format(goal))

    print('[end]create_grid_with_nodes')
    return grid, graph, start_closest, goal
def create_graph_probabilistic(data,
                               drone_altitude,
                               safety_distance,
                               k=10,
                               sample_size=300):

    sampler = Sampler(data, drone_altitude, safety_distance)
    polygons = sampler._polygons
    print("Polygons", len(polygons))
    nodes = sampler.sample(sample_size)
    print("Data", len(data))
    print("Probabilistic node length", len(nodes))
    g = nx.Graph()
    tree = KDTree(nodes)
    for n1 in nodes:
        # for each node connect try to connect to k nearest nodes
        idxs = tree.query([n1], k, return_distance=False)[0]

        for idx in idxs:
            n2 = nodes[idx]
            if n2 == n1:
                continue

            if can_connect(n1, n2, polygons):
                g.add_edge(n1, n2, weight=1)

    return g
Exemple #6
0
    def expectation(self):
        if self.sampler is None:
            self.sampler = Sampler(self.dbm, self.sample_size, self.initial_update, self.update_time)
        
        values = self.sampler.sampling()
        weight = self.dbm.weight_matrix(values)

        return weight
    def plan_path(self):
        self.flight_state = States.PLANNING

        print("Searching for a path ...")
        TARGET_ALTITUDE = 5
        SAFETY_DISTANCE = 5

        self.target_position[2] = TARGET_ALTITUDE

        # TODO: read lat0, lon0 from colliders into floating point values
        data = np.genfromtxt('colliders.csv',
                             delimiter=',',
                             dtype='str',
                             max_rows=1)
        _, lat0 = data[0].strip().split(' ')
        _, lon0 = data[1].strip().split(' ')
        # TODO: set home position to (lon0, lat0, 0)
        self.set_home_position(float(lon0), float(lat0), 0)
        # TODO: retrieve current global position
        curr_glb_pos = [self._longitude, self._latitude, self._altitude]
        # TODO: convert to current local position using global_to_local()
        self._north, self._east, self._down = global_to_local(
            curr_glb_pos, self.global_home)
        print('global home {0}, position {1}, local position {2}'.format(
            self.global_home, self.global_position, self.local_position))
        # Read in obstacle map
        data = np.loadtxt('colliders.csv',
                          delimiter=',',
                          dtype='Float64',
                          skiprows=2)

        sampler = Sampler(data, SAFETY_DISTANCE)
        self.polygons = sampler._polygons
        nodes = sampler.sample(300)
        print('nodes_len: ', len(nodes))

        g = self.create_graph(nodes, 10)
        print('graph_edgs: ', len(g.edges))
        start = self.local_position
        goal = global_to_local([-122.396428, 37.795128, TARGET_ALTITUDE],
                               self.global_home)

        start = self.find_closest_node(g.nodes, start)
        goal = self.find_closest_node(g.nodes, goal)
        path, cost = a_star_for_graph(g, heuristic, start, goal)
        print('a_star_path: ', path)
        path = self.prune_path(path)
        print('prune_path: ', path)
        if len(path) > 0:
            # Convert path to waypoints
            waypoints = [[p[0], p[1], TARGET_ALTITUDE, 0] for p in path]
            # Set self.waypoints
            self.waypoints = waypoints
            # TODO: send waypoints to sim (this is just for visualization of waypoints)
            self.send_waypoints()
Exemple #8
0
def sample_nodes(data, grid_start, grid_goal, debug):
    start_time = time.time()
    sampler = Sampler(data)
    # Extract all the polygons
    polygons = sampler._polygons
    nodes = sampler.sample(grid_start, grid_goal, NUM_SAMPLES)
    stop_time = time.time()
    if debug:
        print("Time taken to build Sampler is: {0:5.2f}s".format(stop_time -
                                                                 start_time))
    return nodes, polygons
def nonbatch(task, method, N, M):
    simulation_object = create_env(task)
    d = simulation_object.num_of_features
    lower_input_bound = [x[0] for x in simulation_object.feed_bounds]
    upper_input_bound = [x[1] for x in simulation_object.feed_bounds]

    w_sampler = Sampler(d)
    psi_set = []
    s_set = []
    input_A = np.random.uniform(low=2 * lower_input_bound,
                                high=2 * upper_input_bound,
                                size=(2 * simulation_object.feed_size))
    input_B = np.random.uniform(low=2 * lower_input_bound,
                                high=2 * upper_input_bound,
                                size=(2 * simulation_object.feed_size))
    psi, s = get_feedback(simulation_object, input_A, input_B)
    psi_set.append(psi)
    s_set.append(s)
    for i in range(1, N):
        w_sampler.A = psi_set
        w_sampler.y = np.array(s_set).reshape(-1, 1)
        w_samples = w_sampler.sample(M)
        mean_w_samples = np.mean(w_samples, axis=0)
        print('w-estimate = {}'.format(mean_w_samples /
                                       np.linalg.norm(mean_w_samples)))
        input_A, input_B = run_algo(method, simulation_object, w_samples)
        psi, s = get_feedback(simulation_object, input_A, input_B)
        psi_set.append(psi)
        s_set.append(s)
    w_sampler.A = psi_set
    w_sampler.y = np.array(s_set).reshape(-1, 1)
    w_samples = w_sampler.sample(M)
    print('w-estimate = {}'.format(mean_w_samples /
                                   np.linalg.norm(mean_w_samples)))
Exemple #10
0
def nonbatch(task, method, N, M):
    simulation_object = create_env(task)
    d = simulation_object.num_of_features
	
    w_true = 2*np.random.rand(d)-1
    w_true = w_true / np.linalg.norm(w_true)
    print('If in automated mode: true w = {}'.format(w_true/np.linalg.norm(w_true)))
	
    lower_input_bound = [x[0] for x in simulation_object.feed_bounds]
    upper_input_bound = [x[1] for x in simulation_object.feed_bounds]

    w_sampler = Sampler(d)
    psi_set = []
    s_set = []
    for i in range(N):
        w_sampler.A = psi_set
        w_sampler.y = np.array(s_set).reshape(-1,1)
        w_samples = w_sampler.sample(M)
        mean_w_samples = np.mean(w_samples,axis=0)
        print('Samples so far: ' + str(i))
        print('w estimate = {}'.format(mean_w_samples/np.linalg.norm(mean_w_samples)))
        print('Alignment = {}'.format(mean_w_samples.dot(w_true)/np.linalg.norm(mean_w_samples)))
        input_A, input_B = run_algo(method, simulation_object, w_samples)
        psi, s = get_feedback(simulation_object, input_A, input_B, w_true)
        psi_set.append(psi)
        s_set.append(s)
    w_sampler.A = psi_set
    w_sampler.y = np.array(s_set).reshape(-1,1)
    w_samples = w_sampler.sample(M)
    print('Samples so far: ' + str(N))
    print('w estimate = {}'.format(mean_w_samples/np.linalg.norm(mean_w_samples)))
    print('Alignment = {}'.format(mean_w_samples.dot(w_true)/np.linalg.norm(mean_w_samples)))
Exemple #11
0
class first_smci:
    def __init__(self, dbm, sample_size=500, initial_update=1000, update_time=1):
        self.sampler = None
        self.dbm = dbm
        self.initial_update = initial_update
        self.update_time = update_time
        self.sample_size = sample_size
        self.mariginalize = self.dbm.propagation.first_smci_marginalize
    
    def expectation(self):
        if self.sampler is None:
            self.sampler = Sampler(self.dbm, self.sample_size, self.initial_update, self.update_time)
        
        values = self.sampler.sampling()

        signals = [None for i in self.dbm.layers]
        signals[0] = self.dbm.signal(values[1], -1)
        for i in range(1, len(self.dbm.layers)-1):
            signals[i] = self.dbm.signal(values[i-1], i) + self.dbm.signal(values[i+1], -(i+1))
        signals[-1] = self.dbm.signal(values[-2], len(self.dbm.weights))

        multiply_up = [None for i in self.dbm.weights]
        multiply_down = [None for i in self.dbm.weights]
        for i,_ in enumerate(self.dbm.weights):
            multiply_up[i] = values[i][:, :, tf.newaxis] * self.dbm.weights[i]
            multiply_down[i] = values[i+1][:, tf.newaxis, :] * self.dbm.weights[i]
        
        expectations = [None for i in self.dbm.weights]
        for i,_ in enumerate(self.dbm.weights):
            expectations[i] = self.mariginalize( signals[i][:, :, tf.newaxis]-multiply_down[i], signals[i+1][:, tf.newaxis, :]-multiply_up[i], self.dbm.weights[i])

        return expectations
def batch(task, method, N, M, b):
    if N % b != 0:
        print('N must be divisible to b')
        exit(0)
    B = 20 * b

    simulation_object = create_env(task)
    d = simulation_object.num_of_features
    lower_input_bound = [x[0] for x in simulation_object.feed_bounds]
    upper_input_bound = [x[1] for x in simulation_object.feed_bounds]

    w_sampler = Sampler(d)
    psi_set = []
    s_set = []
    inputA_set = np.random.uniform(low=2 * lower_input_bound,
                                   high=2 * upper_input_bound,
                                   size=(b, 2 * simulation_object.feed_size))
    inputB_set = np.random.uniform(low=2 * lower_input_bound,
                                   high=2 * upper_input_bound,
                                   size=(b, 2 * simulation_object.feed_size))
    for j in range(b):
        input_A = inputA_set[j]
        input_B = inputB_set[j]
        psi, s = get_feedback(simulation_object, input_A, input_B)
        psi_set.append(psi)
        s_set.append(s)
    i = b
    while i < N:
        w_sampler.A = psi_set
        w_sampler.y = np.array(s_set).reshape(-1, 1)
        w_samples = w_sampler.sample(M)
        mean_w_samples = np.mean(w_samples, axis=0)
        print('w-estimate = {}'.format(mean_w_samples /
                                       np.linalg.norm(mean_w_samples)))
        print('Samples so far: ' + str(i))
        inputA_set, inputB_set = run_algo(method, simulation_object, w_samples,
                                          b, B)
        for j in range(b):
            input_A = inputA_set[j]
            input_B = inputB_set[j]
            psi, s = get_feedback(simulation_object, input_B, input_A)
            psi_set.append(psi)
            s_set.append(s)
        i += b
    w_sampler.A = psi_set
    w_sampler.y = np.array(s_set).reshape(-1, 1)
    w_samples = w_sampler.sample(M)
    mean_w_samples = np.mean(w_samples, axis=0)
    print('w-estimate = {}'.format(mean_w_samples /
                                   np.linalg.norm(mean_w_samples)))
Exemple #13
0
def nonbatch(task, criterion, query_type, epsilon, M):
	simulation_object = create_env(task)		
	d = simulation_object.num_of_features

	true_delta = 1 # make this None if you will also learn delta, and change the samplers below from sample_given_delta to sample (and of course remove the true_delta argument)

	lower_input_bound = [x[0] for x in simulation_object.feed_bounds]
	upper_input_bound = [x[1] for x in simulation_object.feed_bounds]

	w_sampler = Sampler(d)
	i = 0
	score = np.inf
	while score >= epsilon:
		w_samples, delta_samples = w_sampler.sample_given_delta(M, query_type, true_delta)
		mean_w_samples = np.mean(w_samples,axis=0)
		print('w-estimate = {}'.format(mean_w_samples/np.linalg.norm(mean_w_samples)))
		input_A, input_B, score = run_algo(criterion, simulation_object, w_samples, delta_samples)
		if criterion == 'information':
			print('Expected info gain = {}'.format(score))
		elif criterion == 'volume':
			print('Expected volume removal (meaningless scale) = {}'.format(score/M))
		if score > epsilon:
			phi_A, phi_B, s = get_feedback(simulation_object, input_A, input_B, query_type)
			w_sampler.feed(phi_A, phi_B, [s])
			i += 1
	w_samples, delta_samples = w_sampler.sample_given_delta(M, query_type, true_delta)
	mean_w_samples = np.mean(w_samples,axis=0)
	print('w-estimate = {}'.format(mean_w_samples/np.linalg.norm(mean_w_samples)))
Exemple #14
0
    def expectation(self):
        if self.sampler is None:
            self.sampler = Sampler(self.dbm, self.sample_size, self.initial_update, self.update_time)
        
        values = self.sampler.sampling()

        signals = [None for i in self.dbm.layers]
        signals[0] = self.dbm.signal(values[1], -1)
        for i in range(1, len(self.dbm.layers)-1):
            signals[i] = self.dbm.signal(values[i-1], i) + self.dbm.signal(values[i+1], -(i+1))
        signals[-1] = self.dbm.signal(values[-2], len(self.dbm.weights))

        multiply_up = [None for i in self.dbm.weights]
        multiply_down = [None for i in self.dbm.weights]
        for i,_ in enumerate(self.dbm.weights):
            multiply_up[i] = values[i][:, :, tf.newaxis] * self.dbm.weights[i]
            multiply_down[i] = values[i+1][:, tf.newaxis, :] * self.dbm.weights[i]
        
        expectations = [None for i in self.dbm.weights]
        for i,_ in enumerate(self.dbm.weights):
            expectations[i] = self.mariginalize( signals[i][:, :, tf.newaxis]-multiply_down[i], signals[i+1][:, tf.newaxis, :]-multiply_up[i], self.dbm.weights[i])

        return expectations
Exemple #15
0
class four_layer_second_smci:
    def __init__(self, dbm, sample_size=500, initial_update=1000, update_time=1):
        if len(dbm.layers) != 4:
            raise ValueError("2-SMCI supports only 4-layered DBM.")

        self.sampler = None
        self.dbm = dbm
        self.initial_update = initial_update
        self.update_time = update_time
        self.sample_size = sample_size
        self.mariginalize = self.dbm.propagation.first_smci_marginalize
        self.mariginalize2 = self.dbm.propagation.second_smci_marginalize
    
    def expectation(self):
        if self.sampler is None:
            self.sampler = Sampler(self.dbm, self.sample_size, self.initial_update, self.update_time)
        
        values = self.sampler.sampling()

        expectations = [None for i in self.dbm.weights]
        signal_up = [None for i in self.dbm.weights]
        signal_down = [None for i in self.dbm.weights]
        multiply_up = [None for i in self.dbm.weights]
        multiply_down = [None for i in self.dbm.weights]
        for i,_ in enumerate(self.dbm.weights):
            signal_up[i] = self.dbm.signal(values[i], i+1)
            signal_down[i] = self.dbm.signal(values[i+1], -(i+1))
            multiply_up[i] = signal_up[i][:, tf.newaxis, :] - values[i][:, :, tf.newaxis] * self.dbm.weights[i]
            multiply_down[i] = signal_down[i][:, :, tf.newaxis] - values[i+1][:, tf.newaxis, :] * self.dbm.weights[i]

        # expectation[0]
        x = multiply_up[0] + self.mariginalize2( signal_down[2][:, tf.newaxis, :] + multiply_up[1], self.dbm.weights[1], axis=2)[:, tf.newaxis, :]
        y = multiply_down[0]
        z = self.dbm.weights[0][tf.newaxis, :, :]
        expectations[0] = self.mariginalize(x, y, z)

        # expectation[1]
        x = multiply_down[1] + self.mariginalize2( multiply_down[0], self.dbm.weights[0], axis=1)[:, :, tf.newaxis]
        y = multiply_up[1] + self.mariginalize2( multiply_up[2], self.dbm.weights[2], axis=2)[:, tf.newaxis, :]
        z = self.dbm.weights[1][tf.newaxis, :, :]
        expectations[1] = self.mariginalize(x, y, z)

        # expectation[2]
        x = multiply_up[2]
        y = multiply_down[2] + self.mariginalize2( signal_up[0][:, :, tf.newaxis] + multiply_down[1], self.dbm.weights[1], axis=1)[:, :, tf.newaxis]
        z = self.dbm.weights[2][tf.newaxis, :, :]
        expectations[2] = self.mariginalize(x, y, z)

        return expectations
 def read_ply(self, file_name):
     num_samples = self.num_samples // len(self.files_list)
     if self.file_index == len(self.files_list) - 1:
         num_samples = num_samples + (self.num_samples - (num_samples * len(self.files_list)))
     
     root, ext = os.path.splitext(file_name)
     if not os.path.isfile(root + ".npy"):
         ply = PlyData.read(file_name)
         vertex = ply['vertex']
         (x, y, z) = (vertex[t] for t in ('x', 'y', 'z'))
         points = zip(x.ravel(), y.ravel(), z.ravel())
         np.save(root + ".npy", points)
     else:
         points = np.load(root + ".npy")
     
     #load normals
     if os.path.isfile(root + "_normals" + ".ply"):
         if not os.path.isfile(root + "_normals" + ".npy"):
             ply1 = PlyData.read(root + "_normals" + ".ply")
             vertex = ply1['vertex']
             (nx, ny, nz) = (vertex[t] for t in ('nx', 'ny', 'nz'))
             self.normals = np.asarray(zip(nx.ravel(), ny.ravel(), nz.ravel()))
             np.save(root + "_normals" + ".npy", self.normals)
         else:
             self.normals = np.load(root + "_normals" + ".npy")
     
     if self.add_noise:
         self.data = utils.add_noise_normal(points, std=self.nois_std)
     else:
         self.data = np.asarray(points)
     
     self.pc_diameter = utils.get_pc_diameter(self.data)
     self.l = self.relL*self.pc_diameter
     
     rot = utils.angle_axis_to_rotation(self.rotation_angle, self.rotation_axis)
     self.data = utils.transform_pc(self.data, rot)
     
     #plotutils.show_pc(self.data)
     #mlab.show()
             
     #TODO: better sampling
     print "sampling file: ", file_name
     self.samples, self.sample_indices = Sampler.sample(self.data, -1, min_num_point=-1, file_name=file_name, sampling_algorithm=self.sampling_algorithm)
     #self.samples, self.sample_indices = Sampler.sample(self.data, -1, num_samples, file_name=file_name, sampling_algorithm=self.sampling_algorithm)
     #self.samples = self.samples[0:num_samples]
     #self.sample_indices = self.sample_indices[0:num_samples]
     
     self.tree = spatial.KDTree(self.data)
     return self.data
def nonbatch(task, method, N, M, checkpoints=None):
    if checkpoints is None:
        checkpoints = []
    checkpointed_weights = []
    simulation_object = create_env(task)
    d = simulation_object.num_of_features
    lower_input_bound = [x[0] for x in simulation_object.feed_bounds]
    upper_input_bound = [x[1] for x in simulation_object.feed_bounds]

    w_sampler = Sampler(d)
    psi_set = []
    s_set = []
    input_A = np.random.uniform(low=2 * lower_input_bound,
                                high=2 * upper_input_bound,
                                size=(2 * simulation_object.feed_size))
    input_B = np.random.uniform(low=2 * lower_input_bound,
                                high=2 * upper_input_bound,
                                size=(2 * simulation_object.feed_size))
    psi, s = get_feedback_auto(
        simulation_object, input_A,
        input_B)  # psi is the difference, s is the 1 or -1 signal
    psi_set.append(psi)
    s_set.append(s)
    for i in range(1, N):
        w_sampler.A = psi_set
        w_sampler.y = np.array(s_set).reshape(-1, 1)
        w_samples = w_sampler.sample(M)
        mean_w_samples = np.mean(w_samples, axis=0)
        print('w-estimate = {}'.format(mean_w_samples /
                                       np.linalg.norm(mean_w_samples)))
        if i in checkpoints:
            checkpointed_weights.append(mean_w_samples /
                                        np.linalg.norm(mean_w_samples))
            print("Weights saved at iteration {}".format(i))
        input_A, input_B = run_algo(method, simulation_object, w_samples)
        psi, s = get_feedback_auto(simulation_object, input_A, input_B)
        psi_set.append(psi)
        s_set.append(s)
    w_sampler.A = psi_set
    w_sampler.y = np.array(s_set).reshape(-1, 1)
    w_samples = w_sampler.sample(M)
    checkpointed_weights.append(mean_w_samples /
                                np.linalg.norm(mean_w_samples))
    print('w-estimate = {}'.format(mean_w_samples /
                                   np.linalg.norm(mean_w_samples)))
    return checkpointed_weights
Exemple #18
0
class montecarlo:
    def __init__(self, dbm, sample_size=500, initial_update=1000, update_time=1):
        self.sampler = None
        self.dbm = dbm
        self.initial_update = initial_update
        self.update_time = update_time
        self.sample_size = sample_size

    def expectation(self):
        if self.sampler is None:
            self.sampler = Sampler(self.dbm, self.sample_size, self.initial_update, self.update_time)
        
        values = self.sampler.sampling()
        weight = self.dbm.weight_matrix(values)

        return weight
Exemple #19
0
    def read_ply(self, file_name, num_samples=1000, sample_class_start=0, add_noise =False,
                  noise_prob=0.3, noise_factor=0.02, noise_std=0.1, sampling_algorithm=SampleAlgorithm.Uniform,
                  rotation_axis=[0, 0, 1], rotation_angle=0):
         
        root, ext = os.path.splitext(file_name)
        if not os.path.isfile(root + ".npy"):
            ply = PlyData.read(file_name)
            vertex = ply['vertex']
            (x, y, z) = (vertex[t] for t in ('x', 'y', 'z'))
            points = zip(x.ravel(), y.ravel(), z.ravel())
            np.save(root + ".npy", points)
        else:
            points = np.load(root + ".npy")
        
        #load normals
        if os.path.isfile(root + "_normals" + ".ply"):
            if not os.path.isfile(root + "_normals" + ".npy"):
                ply1 = PlyData.read(root + "_normals" + ".ply")
                vertex = ply1['vertex']
                (nx, ny, nz) = (vertex[t] for t in ('nx', 'ny', 'nz'))
                self.normals = np.asarray(zip(nx.ravel(), ny.ravel(), nz.ravel()))
                np.save(root + "_normals" + ".npy", self.normals)
            else:
                self.normals = np.load(root + "_normals" + ".npy")
        
        if add_noise:
            print "adding noise to model.."
            mr = utils.model_resolution(np.array(points))
            #mr = 0.404
            print "model resolution: ", mr
            self.data = utils.add_noise_normal(np.array(points), mr, noise_std)
        else:
            self.data = np.asarray(points)
        rot = utils.angle_axis_to_rotation(rotation_angle, rotation_axis)
        self.data = utils.transform_pc(self.data, rot)
        #plotutils.show_pc(self.data)
        #mlab.show()
#TODO: better sampling
        self.samples, self.sample_indices = Sampler.sample(self.data, -1, num_samples-1, file_name=file_name, pose=rot, sampling_algorithm=sampling_algorithm)
        self.tree = spatial.KDTree(self.data) 
        self.sample_class_start = sample_class_start
        self.sample_class_current = sample_class_start
        self.num_samples = self.samples.shape[0]
        print "num samples: ", self.num_samples
        logging.basicConfig(filename='example.log',level=logging.DEBUG)
        return self.data
Exemple #20
0
def batch(task, method, N, M, b):
    if N % b != 0:
        print('N must be divisible to b')
        exit(0)
    B = 20*b

    simulation_object = create_env(task)
    d = simulation_object.num_of_features
	
    w_true = 2*np.random.rand(d)-1
    w_true = w_true / np.linalg.norm(w_true)
    print('If in automated mode: true w = {}'.format(w_true/np.linalg.norm(w_true)))
	
    lower_input_bound = [x[0] for x in simulation_object.feed_bounds]
    upper_input_bound = [x[1] for x in simulation_object.feed_bounds]

    w_sampler = Sampler(d)
    psi_set = []
    s_set = []
    i = 0
    while i < N:
        w_sampler.A = psi_set
        w_sampler.y = np.array(s_set).reshape(-1,1)
        w_samples = w_sampler.sample(M)
        mean_w_samples = np.mean(w_samples,axis=0)
        print('Samples so far: ' + str(i))
        print('w estimate = {}'.format(mean_w_samples/np.linalg.norm(mean_w_samples)))
        print('Alignment = {}'.format(mean_w_samples.dot(w_true)/np.linalg.norm(mean_w_samples)))
        inputA_set, inputB_set = run_algo(method, simulation_object, w_samples, b, B)
        for j in range(b):
            input_A = inputA_set[j]
            input_B = inputB_set[j]
            psi, s = get_feedback(simulation_object, input_B, input_A, w_true)
            psi_set.append(psi)
            s_set.append(s)
        i += b
    w_sampler.A = psi_set
    w_sampler.y = np.array(s_set).reshape(-1,1)
    w_samples = w_sampler.sample(M)
    mean_w_samples = np.mean(w_samples, axis=0)
    print('Samples so far: ' + str(N))
    print('w estimate = {}'.format(mean_w_samples/np.linalg.norm(mean_w_samples)))
    print('Alignment = {}'.format(mean_w_samples.dot(w_true)/np.linalg.norm(mean_w_samples)))
def find_threshold(num_weighted_samples,
                   num_random_samples,
                   reward_values,
                   num_membership_queries=0,
                   task='driver',
                   method="nonbatch"):
    # first, sample the trajectories from the distribution\
    simulation_object = create_env(task)
    d = simulation_object.num_of_features
    lower_input_bound = [x[0] for x in simulation_object.feed_bounds]
    upper_input_bound = [x[1] for x in simulation_object.feed_bounds]

    w_sampler = Sampler(d)
    # set the reward weights of the sampler
    # set the number of membership queries as a log function of the total # of samples
    num_membership_queries = max(
        num_membership_queries,
        int(math.ceil(math.log(num_weighted_samples + num_random_samples))))
    reward_traj_set = collect_trajectories(simulation_object, method,
                                           num_weighted_samples, reward_values)
    random_traj_set = collect_trajectories(simulation_object, "random",
                                           num_random_samples, reward_values)
    w_true = np.array([0.56687795, -0.51010378, 0.5178173, 0.38769675])

    svm_reward_set = reward_traj_set[:
                                     num_membership_queries] + collect_trajectories(
                                         simulation_object, method,
                                         num_weighted_samples, reward_values)
    #adding n more samples to the svm dataset --> n + log(n) samples

    svm_random_set = random_traj_set[:
                                     num_membership_queries] + collect_trajectories(
                                         simulation_object, "random",
                                         num_weighted_samples, reward_values)
    #adding n more samples to the svm dataset --> n + log(n) samples

    full_traj_set = reward_traj_set + random_traj_set
    # sort the trajectories by reward
    sorted_lattice = lattice.sort_on_rewards(full_traj_set)

    #test set trajectories sampled from w_true
    f_reward = open('reward_test_set.obj', 'rb')
    reward_traj_set_test = pickle.load(f_reward)
    f_reward.close()
    #test set trajectories sampled randomly
    f_random = open('random_test_set.obj', 'rb')
    random_traj_set_test = pickle.load(f_random)
    f_random.close()

    #get data and labels for the test set
    x = []
    y = []
    r = []
    reward_traj_set_test = reward_traj_set_test + random_traj_set_test
    for node in reward_traj_set_test:
        #print(node.reward_value)
        x.append(node.features)
        reward = np.sum(np.dot(w_true, node.features))
        r.append(reward)
        if reward < 0.74:
            y.append(0)
        else:
            y.append(1)
    print(y)

    #now, begin getting membership query feedback on things
    bsearch_reward_bound, labeled_data = membership_threshold(
        sorted_lattice, simulation_object, get_labels=True)
    svm_bsearch_coeff, svm_bsearch_inter, clssfr_bsearch = svm_threshold(
        svm_reward_set, simulation_object, labeled_samples=labeled_data)
    svm_reward_coeff, svm_reward_inter, clssfr_reward = svm_threshold(
        svm_reward_set, simulation_object)
    svm_random_coeff, svm_random_inter, clssfr_random = svm_threshold(
        svm_random_set, simulation_object)
    # finished process
    print("Reward boundary retrieved from binary search method is {}".format(
        bsearch_reward_bound))
    print(
        "SVM coefficient and intercept for same queries as binary search are: {} and {}"
        .format(svm_bsearch_coeff, svm_bsearch_inter))
    print(
        "SVM coefficient and intercept for reward-sampled queries are: {} and {}"
        .format(svm_reward_coeff, svm_reward_inter))
    print(
        "SVM coefficient and intercept for random-sampled queries are: {} and {}"
        .format(svm_random_coeff, svm_random_inter))
    print("Reward weights for task are {}".format(reward_values))

    acc_bsearch = get_accuracy(r,
                               y,
                               reward_bound=bsearch_reward_bound,
                               clssfr=None)
    acc_svm_learnt = get_accuracy(x,
                                  y,
                                  reward_bound=None,
                                  clssfr=clssfr_reward)
    acc_svm_random = get_accuracy(x,
                                  y,
                                  reward_bound=None,
                                  clssfr=clssfr_random)

    print("Accuracy for binary search is ", acc_bsearch)
    print("Accuracy for svm with reward-sampled queries is ", acc_svm_learnt)
    print("Accuracy for svm with randomly-sampled queries is ", acc_svm_random)
Exemple #22
0
     if args.cell_type == 1:
         rh = np.zeros([1, args.internal_size * (args.layers * 2)])
     else:
         rh = np.zeros([1, args.internal_size * (args.layers)])
     out_str = ""
     for k in range(1000):
         ryo, rh = sess.run([Yo, H],
                            feed_dict={
                                X: ry,
                                dropout_prob: 1.0,
                                initial_state: rh,
                                batchsize: 1
                            })
         char_int_list = []
         for i in range(1):
             char_int = Sampler.sample_by_prop(ryo[i], VOCAB_SIZE, 0)
             out_str += str(translation["INT_TO_CHAR"][char_int])
             char_int_list.append(char_int)
         ry = np.array([char_int_list])
     with open(
             args.out_folder + "samples/sample_" + timestamp +
             "_epoch_" + str(epoch + 1) + ".txt", "wb") as fd:
         fd.write(
             b"======================================== SAMPLE ========================================\n"
         )
         fd.write(out_str.encode())
         fd.write(
             b"\n====================================== END SAMPLE ======================================\n"
         )
 if (epoch + 1) % 10 == 0 and LAST_DIVISION != (epoch + 1):
     learn_rate /= 2  # from the seq2seq paper but a wee bit later epoch wise
Exemple #23
0
    enc_dec.build_trainer(src, src_mask, trg, trg_mask)
    enc_dec.build_sampler()

    if configuration['reload']:
        enc_dec.load()

    sample_search = BeamSearch(enc_dec=enc_dec,
                               configuration=configuration,
                               beam_size=1,
                               maxlen=configuration['seq_len_src'], stochastic=True)
    valid_search = BeamSearch(enc_dec=enc_dec, 
                              configuration=configuration,
                              beam_size=configuration['beam_size'],
                              maxlen=3*configuration['seq_len_src'], stochastic=False)

    sampler = Sampler(sample_search, **configuration)
    bleuvalidator = BleuValidator(valid_search, **configuration)

    # train function
    train_fn = enc_dec.train_fn
    if configuration.get('with_layernorm', False):
        update_fn = enc_dec.update_fn

    # train data
    ds = DStream(**configuration)

    # valid data
    vs = get_devtest_stream(data_type='valid', input_file=None, **configuration)

    # main_loop
    # modified by Zhaopeng Tu, 2016-07-14
Exemple #24
0
def main(config, tr_stream, dev_stream):
    # Create Theano variables
    logger.info('Creating theano variables')
    source_char_seq = tensor.lmatrix('source_char_seq')
    source_sample_matrix = tensor.btensor3('source_sample_matrix')
    source_char_aux = tensor.bmatrix('source_char_aux')
    source_word_mask = tensor.bmatrix('source_word_mask')
    target_char_seq = tensor.lmatrix('target_char_seq')
    target_char_aux = tensor.bmatrix('target_char_aux')
    target_char_mask = tensor.bmatrix('target_char_mask')
    target_sample_matrix = tensor.btensor3('target_sample_matrix')
    target_word_mask = tensor.bmatrix('target_word_mask')
    target_resample_matrix = tensor.btensor3('target_resample_matrix')
    target_prev_char_seq = tensor.lmatrix('target_prev_char_seq')
    target_prev_char_aux = tensor.bmatrix('target_prev_char_aux')
    target_bos_idx = tr_stream.trg_bos
    target_space_idx = tr_stream.space_idx['target']

    # Construct model
    logger.info('Building RNN encoder-decoder')

    encoder = BidirectionalEncoder(config['src_vocab_size'],
                                   config['enc_embed'],
                                   config['src_dgru_nhids'],
                                   config['enc_nhids'],
                                   config['src_dgru_depth'],
                                   config['bidir_encoder_depth'])

    decoder = Decoder(config['trg_vocab_size'], config['dec_embed'],
                      config['trg_dgru_nhids'], config['trg_igru_nhids'],
                      config['dec_nhids'], config['enc_nhids'] * 2,
                      config['transition_depth'], config['trg_igru_depth'],
                      config['trg_dgru_depth'], target_space_idx,
                      target_bos_idx)

    representation = encoder.apply(source_char_seq, source_sample_matrix,
                                   source_char_aux, source_word_mask)
    cost = decoder.cost(representation, source_word_mask, target_char_seq,
                        target_sample_matrix, target_resample_matrix,
                        target_char_aux, target_char_mask, target_word_mask,
                        target_prev_char_seq, target_prev_char_aux)

    logger.info('Creating computational graph')
    cg = ComputationGraph(cost)

    # Initialize model
    logger.info('Initializing model')
    encoder.weights_init = decoder.weights_init = IsotropicGaussian(
        config['weight_scale'])
    encoder.biases_init = decoder.biases_init = Constant(0)
    encoder.push_initialization_config()
    decoder.push_initialization_config()
    for layer_n in range(config['src_dgru_depth']):
        encoder.decimator.dgru.transitions[layer_n].weights_init = Orthogonal()
    for layer_n in range(config['bidir_encoder_depth']):
        encoder.children[
            1 + layer_n].prototype.recurrent.weights_init = Orthogonal()
    if config['trg_igru_depth'] == 1:
        decoder.interpolator.igru.weights_init = Orthogonal()
    else:
        for layer_n in range(config['trg_igru_depth']):
            decoder.interpolator.igru.transitions[
                layer_n].weights_init = Orthogonal()
    for layer_n in range(config['trg_dgru_depth']):
        decoder.interpolator.feedback_brick.dgru.transitions[
            layer_n].weights_init = Orthogonal()
    for layer_n in range(config['transition_depth']):
        decoder.transition.transitions[layer_n].weights_init = Orthogonal()
    encoder.initialize()
    decoder.initialize()

    # Print shapes
    shapes = [param.get_value().shape for param in cg.parameters]
    logger.info("Parameter shapes: ")
    for shape, count in Counter(shapes).most_common():
        logger.info('    {:15}: {}'.format(str(shape), count))
    logger.info("Total number of parameters: {}".format(len(shapes)))

    # Print parameter names
    enc_dec_param_dict = merge(
        Selector(encoder).get_parameters(),
        Selector(decoder).get_parameters())
    logger.info("Parameter names: ")
    for name, value in enc_dec_param_dict.items():
        logger.info('    {:15}: {}'.format(str(value.get_value().shape), name))
    logger.info("Total number of parameters: {}".format(
        len(enc_dec_param_dict)))

    # Set up training model
    logger.info("Building model")
    training_model = Model(cost)
    # Set up training algorithm
    logger.info("Initializing training algorithm")
    algorithm = GradientDescent(cost=cost,
                                parameters=cg.parameters,
                                step_rule=CompositeRule([
                                    StepClipping(config['step_clipping']),
                                    eval(config['step_rule'])()
                                ]))

    # Set extensions
    logger.info("Initializing extensions")
    # Extensions
    gradient_norm = aggregation.mean(algorithm.total_gradient_norm)
    step_norm = aggregation.mean(algorithm.total_step_norm)
    train_monitor = CostCurve([cost, gradient_norm, step_norm],
                              config=config,
                              after_batch=True,
                              before_first_epoch=True,
                              prefix='tra')
    extensions = [
        train_monitor,
        Timing(),
        Printing(every_n_batches=config['print_freq']),
        FinishAfter(after_n_batches=config['finish_after']),
        CheckpointNMT(config['saveto'], every_n_batches=config['save_freq'])
    ]

    # Set up beam search and sampling computation graphs if necessary
    if config['hook_samples'] >= 1 or config['bleu_script'] is not None:
        logger.info("Building sampling model")
        generated = decoder.generate(representation, source_word_mask)
        search_model = Model(generated)
        _, samples = VariableFilter(
            bricks=[decoder.sequence_generator], name="outputs")(
                ComputationGraph(generated[config['transition_depth']])
            )  # generated[transition_depth] is next_outputs

    # Add sampling
    if config['hook_samples'] >= 1:
        logger.info("Building sampler")
        extensions.append(
            Sampler(model=search_model,
                    data_stream=tr_stream,
                    hook_samples=config['hook_samples'],
                    transition_depth=config['transition_depth'],
                    every_n_batches=config['sampling_freq'],
                    src_vocab_size=config['src_vocab_size']))

    # Add early stopping based on bleu
    if config['bleu_script'] is not None:
        logger.info("Building bleu validator")
        extensions.append(
            BleuValidator(source_char_seq,
                          source_sample_matrix,
                          source_char_aux,
                          source_word_mask,
                          samples=samples,
                          config=config,
                          model=search_model,
                          data_stream=dev_stream,
                          normalize=config['normalized_bleu'],
                          every_n_batches=config['bleu_val_freq']))

    # Reload model if necessary
    if config['reload']:
        extensions.append(LoadNMT(config['saveto']))

    # Initialize main loop
    logger.info("Initializing main loop")
    main_loop = MainLoop(model=training_model,
                         algorithm=algorithm,
                         data_stream=tr_stream,
                         extensions=extensions)

    # Train!
    main_loop.run()
Exemple #25
0
def main_sample():
    pc = utils.read_ply('E:/Fac/BMC Master/Thesis/Models/bunny/reconstruction/plytest/bun_zipper.ply')
    sample_points = Sampler.sample(pc, -1, 1000, sampling_algorithm=SampleAlgorithm.Uniform)
    mlab.points3d(sample_points[:, 0], sample_points[:, 1], sample_points[:, 2], color=(1, 1, 1), mode='point')
    mlab.show()
    print 'done'
print(data)

# ## Step 2 - Sample Points
#
#
# You may want to limit the z-axis values.

# In[70]:

from sampling import Sampler

# TODO: sample points randomly
# then use KDTree to find nearest neighbor polygon
# and test for collision
num_samp = 500
sampler = Sampler(data)
polygons = sampler._polygons
nodes = sampler.sample(num_samp)
print(len(nodes))

# ## Step 3 - Connect Nodes
#
# Now we have to connect the nodes. There are many ways they might be done, it's completely up to you. The only restriction being no edge connecting two nodes may pass through an obstacle.
#
# NOTE: You can use `LineString()` from the `shapely` library to create a line. Additionally, `shapely` geometry objects have a method `.crosses` which return `True` if the geometries cross paths, for instance your `LineString()` with an obstacle `Polygon()`!

# In[71]:

# TODO: connect nodes
# Suggested method
# 1) cast nodes into a graph called "g" using networkx
Exemple #27
0
def create_prm(data):
    sampler = Sampler(data)
    polygons = sampler._polygons
    nodes = sampler.sample(300)
    g = create_graph(nodes, 10,polygons)
    return nodes,g
Exemple #28
0
    def plan_path(self):
        self.flight_state = States.PLANNING
        print("Searching for a path ...")
        TARGET_ALTITUDE = 5
        SAFETY_DISTANCE = 5

        self.target_position[2] = TARGET_ALTITUDE

        # TODO: read lat0, lon0 from colliders into floating point values
        filename = 'colliders.csv'
        data = np.loadtxt(filename, delimiter=';,', dtype='str')[0].split(", ")
        lat0, lon0 = [float(d.split(" ")[1]) for d in data]

        # TODO: set home position to (lon0, lat0, 0)
        self.set_home_position(lon0, lat0, 0)
        print("self.local_position ", self.local_position)

        # TODO: retrieve current global position
        current_glbl_pos = [self._longitude, self._latitude,self._altitude]
        print(current_glbl_pos)
        print(self.global_position)

        # TODO: convert to current local position using global_to_local()
        current_lcl_pos = global_to_local(current_glbl_pos, self.global_home)
        print("current_lcl_pos ", len(current_lcl_pos))
        print('global home {0}, position {1}, local position {2}'.format(self.global_home, self.global_position,
                                                                         self.local_position))
        # Read in obstacle map
        data = np.loadtxt('colliders.csv', delimiter=',', dtype='Float64', skiprows=2)
        
        # Define a grid for a particular altitude and safety margin around obstacles
        _, north_offset, east_offset = create_grid(data, TARGET_ALTITUDE, SAFETY_DISTANCE)
        print("North offset = {0}, east offset = {1}".format(north_offset, east_offset))
        # Define starting point on the grid (this is just grid center)
        # grid_start = (-north_offset, -east_offset)
        # TODO: convert start position to current position rather than map center
        grid_start = (int(current_lcl_pos[0]), int(current_lcl_pos[1]))
        
        # Set goal as some arbitrary position on the grid
        # grid_goal = (-north_offset + 10, -east_offset + 10)

        # Example: sampling 300 points and removing
        # ones conflicting with obstacles.
        # print("Start of sampling...")
        sampler = Sampler(data)
        polygons = sampler.polygons
        nodes = sampler.sample(300)
        nodes.insert(0, (grid_start[0], grid_start[1], 5.0))

        # Build a graph using the sampled nodes
        # and connect each nodes to its 10th closest nodes
        print("Start graph building...")
        t0 = time.time()
        graph = create_graph(polygons, nodes, 11)
        print('graph took {0} seconds to build'.format(time.time() - t0))

        start = list(graph.nodes)[0]

        len_path = 0
        while len_path == 0:
            k = np.random.randint(len(graph.nodes))
            goal = list(graph.nodes)[k]

            path, _ = a_star(graph, heuristic, start, goal)
            len_path = len(path)

        pruned_path = prune_path(path, polygons)
        waypoints = [[int(p[0]), int(p[1]), int(p[2]), 0] for p in pruned_path]
        self.waypoints = waypoints
        # TODO: send waypoints to sim (this is just for visualization of waypoints)
        self.send_waypoints()
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry import Polygon, Point, LineString
from queue import PriorityQueue
from sampling import Sampler
import numpy.linalg as LA
from sklearn.neighbors import KDTree

print(nx.__version__)

filename = 'colliders.csv'
data = np.loadtxt(filename, delimiter=',', dtype='Float64', skiprows=2)
sampler = Sampler(data)


def can_connect(n1, n2):

    polygons = sampler._polygons
    l = LineString([n1, n2])
    for p in polygons:
        minofn1n2 = min(n1[2], n2[2])

        if p.crosses(1) and p.height >= minofn1n2:
            return False
    return True


def create_graph():

    # print(polygons)
Exemple #30
0

import sys
sys.path.append('src/utils')
from config_utils import settings

if __name__ == '__main__':
    sequence_length = settings.model.sequence_length
    vocabulary_size = settings.model.vocabulary_size
    hidden_size = settings.model.hidden_size
    print ('Creating model with configuration: {0}'.format(settings.model))

    model = seq2seq_attention(sequence_length, vocabulary_size, hidden_size)
    print ('Loading model weights from {0}'.format(settings.model.weights_path))
    model.load_weights('models/seq2seq_weights.h5')

    vocabulary_file = settings.data.vocabulary_path
    with open(vocabulary_file, 'r') as handle:
        vocabulary = json.load(handle)
    
    sampler = Sampler(model, vocabulary, sequence_length)
    
    while True:
        question = raw_input('>>')
        response = sampler.respond(question, greedy=True)
        print (response)
        for t in (.7, .8, .9):
            response = sampler.respond(question, temperature=t)
            print (response)

Exemple #31
0
def main(config, tr_stream, dev_stream):

    # Create Theano variables
    source_sentence = tensor.lmatrix('source')
    source_sentence_mask = tensor.matrix('source_mask')
    target_sentence = tensor.lmatrix('target')
    target_sentence_mask = tensor.matrix('target_mask')
    sampling_input = tensor.lmatrix('input')

    # Test values
    '''
    theano.config.compute_test_value = 'warn'
    source_sentence.tag.test_value = numpy.random.randint(10, size=(10, 10))
    target_sentence.tag.test_value = numpy.random.randint(10, size=(10, 10))
    source_sentence_mask.tag.test_value = \
        numpy.random.rand(10, 10).astype('float32')
    target_sentence_mask.tag.test_value = \
        numpy.random.rand(10, 10).astype('float32')
    sampling_input.tag.test_value = numpy.random.randint(10, size=(10, 10))
    '''

    # Construct model
    encoder = BidirectionalEncoder(config['src_vocab_size'],
                                   config['enc_embed'], config['enc_nhids'])
    decoder = Decoder(config['trg_vocab_size'], config['dec_embed'],
                      config['dec_nhids'], config['enc_nhids'] * 2)
    cost = decoder.cost(encoder.apply(source_sentence, source_sentence_mask),
                        source_sentence_mask, target_sentence,
                        target_sentence_mask)

    # Initialize model
    encoder.weights_init = decoder.weights_init = IsotropicGaussian(
        config['weight_scale'])
    encoder.biases_init = decoder.biases_init = Constant(0)
    encoder.push_initialization_config()
    decoder.push_initialization_config()
    encoder.bidir.prototype.weights_init = Orthogonal()
    decoder.transition.weights_init = Orthogonal()
    encoder.initialize()
    decoder.initialize()

    cg = ComputationGraph(cost)

    # Print shapes
    shapes = [param.get_value().shape for param in cg.parameters]
    print('Parameter shapes')
    for shape, count in Counter(shapes).most_common():
        print('    {:15}: {}'.format(shape, count))

    # Set up training algorithm
    algorithm = GradientDescent(cost=cost,
                                params=cg.parameters,
                                step_rule=CompositeRule([
                                    StepClipping(config['step_clipping']),
                                    eval(config['step_rule'])()
                                ]))

    # Set up beam search and sampling computation graphs
    sampling_representation = encoder.apply(sampling_input,
                                            tensor.ones(sampling_input.shape))
    generated = decoder.generate(sampling_input, sampling_representation)
    search_model = Model(generated)
    samples, = VariableFilter(
        bricks=[decoder.sequence_generator], name="outputs")(ComputationGraph(
            generated[1]))  # generated[1] is the next_outputs

    # Set up training model
    training_model = Model(cost)

    enc_param_dict = Selector(encoder).get_params()
    dec_param_dict = Selector(decoder).get_params()

    gh_model_name = '/data/lisatmp3/firatorh/nmt/wmt15/trainedModels/blocks/sanity/refGHOG_adadelta_40k_best_bleu_model.npz'

    tmp_file = numpy.load(gh_model_name)
    gh_model = dict(tmp_file)
    tmp_file.close()

    for key in enc_param_dict:
        print '{:15}: {}'.format(enc_param_dict[key].get_value().shape, key)
    for key in dec_param_dict:
        print '{:15}: {}'.format(dec_param_dict[key].get_value().shape, key)

    enc_param_dict['/bidirectionalencoder/embeddings.W'].set_value(
        gh_model['W_0_enc_approx_embdr'])

    enc_param_dict[
        '/bidirectionalencoder/bidirectionalwmt15/forward.state_to_state'].set_value(
            gh_model['W_enc_transition_0'])
    enc_param_dict[
        '/bidirectionalencoder/bidirectionalwmt15/forward.state_to_update'].set_value(
            gh_model['G_enc_transition_0'])
    enc_param_dict[
        '/bidirectionalencoder/bidirectionalwmt15/forward.state_to_reset'].set_value(
            gh_model['R_enc_transition_0'])

    enc_param_dict['/bidirectionalencoder/fwd_fork/fork_inputs.W'].set_value(
        gh_model['W_0_enc_input_embdr_0'])
    enc_param_dict['/bidirectionalencoder/fwd_fork/fork_inputs.b'].set_value(
        gh_model['b_0_enc_input_embdr_0'])
    enc_param_dict[
        '/bidirectionalencoder/fwd_fork/fork_update_inputs.W'].set_value(
            gh_model['W_0_enc_update_embdr_0'])
    enc_param_dict[
        '/bidirectionalencoder/fwd_fork/fork_reset_inputs.W'].set_value(
            gh_model['W_0_enc_reset_embdr_0'])

    enc_param_dict[
        '/bidirectionalencoder/bidirectionalwmt15/backward.state_to_state'].set_value(
            gh_model['W_back_enc_transition_0'])
    enc_param_dict[
        '/bidirectionalencoder/bidirectionalwmt15/backward.state_to_update'].set_value(
            gh_model['G_back_enc_transition_0'])
    enc_param_dict[
        '/bidirectionalencoder/bidirectionalwmt15/backward.state_to_reset'].set_value(
            gh_model['R_back_enc_transition_0'])

    enc_param_dict['/bidirectionalencoder/back_fork/fork_inputs.W'].set_value(
        gh_model['W_0_back_enc_input_embdr_0'])
    enc_param_dict['/bidirectionalencoder/back_fork/fork_inputs.b'].set_value(
        gh_model['b_0_back_enc_input_embdr_0'])
    enc_param_dict[
        '/bidirectionalencoder/back_fork/fork_update_inputs.W'].set_value(
            gh_model['W_0_back_enc_update_embdr_0'])
    enc_param_dict[
        '/bidirectionalencoder/back_fork/fork_reset_inputs.W'].set_value(
            gh_model['W_0_back_enc_reset_embdr_0'])

    dec_param_dict[
        '/decoder/sequencegenerator/readout/lookupfeedbackwmt15/lookuptable.W'].set_value(
            gh_model['W_0_dec_approx_embdr'])
    #dec_param_dict['/decoder/sequencegenerator/readout/lookupfeedback/lookuptable.W'].set_value(gh_model['W_0_dec_approx_embdr'])

    dec_param_dict[
        '/decoder/sequencegenerator/readout/initializablefeedforwardsequence/maxout_bias.b'].set_value(
            gh_model['b_0_dec_hid_readout_0'])
    dec_param_dict[
        '/decoder/sequencegenerator/readout/initializablefeedforwardsequence/softmax0.W'].set_value(
            gh_model['W1_dec_deep_softmax'])  # Missing W1
    dec_param_dict[
        '/decoder/sequencegenerator/readout/initializablefeedforwardsequence/softmax1.W'].set_value(
            gh_model['W2_dec_deep_softmax'])
    dec_param_dict[
        '/decoder/sequencegenerator/readout/initializablefeedforwardsequence/softmax1.b'].set_value(
            gh_model['b_dec_deep_softmax'])

    dec_param_dict[
        '/decoder/sequencegenerator/readout/merge/transform_states.W'].set_value(
            gh_model['W_0_dec_hid_readout_0'])
    dec_param_dict[
        '/decoder/sequencegenerator/readout/merge/transform_feedback.W'].set_value(
            gh_model['W_0_dec_prev_readout_0'])
    dec_param_dict[
        '/decoder/sequencegenerator/readout/merge/transform_weighted_averages.W'].set_value(
            gh_model['W_0_dec_repr_readout'])
    dec_param_dict[
        '/decoder/sequencegenerator/readout/merge/transform_weighted_averages.b'].set_value(
            gh_model['b_0_dec_repr_readout'])

    dec_param_dict['/decoder/sequencegenerator/fork/fork_inputs.b'].set_value(
        gh_model['b_0_dec_input_embdr_0'])
    dec_param_dict['/decoder/sequencegenerator/fork/fork_inputs.W'].set_value(
        gh_model['W_0_dec_input_embdr_0'])
    dec_param_dict[
        '/decoder/sequencegenerator/fork/fork_update_inputs.W'].set_value(
            gh_model['W_0_dec_update_embdr_0'])
    dec_param_dict[
        '/decoder/sequencegenerator/fork/fork_reset_inputs.W'].set_value(
            gh_model['W_0_dec_reset_embdr_0'])

    dec_param_dict[
        '/decoder/sequencegenerator/att_trans/distribute/fork_inputs.W'].set_value(
            gh_model['W_0_dec_dec_inputter_0'])
    dec_param_dict[
        '/decoder/sequencegenerator/att_trans/distribute/fork_inputs.b'].set_value(
            gh_model['b_0_dec_dec_inputter_0'])
    dec_param_dict[
        '/decoder/sequencegenerator/att_trans/distribute/fork_update_inputs.W'].set_value(
            gh_model['W_0_dec_dec_updater_0'])
    dec_param_dict[
        '/decoder/sequencegenerator/att_trans/distribute/fork_update_inputs.b'].set_value(
            gh_model['b_0_dec_dec_updater_0'])
    dec_param_dict[
        '/decoder/sequencegenerator/att_trans/distribute/fork_reset_inputs.W'].set_value(
            gh_model['W_0_dec_dec_reseter_0'])
    dec_param_dict[
        '/decoder/sequencegenerator/att_trans/distribute/fork_reset_inputs.b'].set_value(
            gh_model['b_0_dec_dec_reseter_0'])

    dec_param_dict[
        '/decoder/sequencegenerator/att_trans/decoder.state_to_state'].set_value(
            gh_model['W_dec_transition_0'])
    dec_param_dict[
        '/decoder/sequencegenerator/att_trans/decoder.state_to_update'].set_value(
            gh_model['G_dec_transition_0'])
    dec_param_dict[
        '/decoder/sequencegenerator/att_trans/decoder.state_to_reset'].set_value(
            gh_model['R_dec_transition_0'])

    dec_param_dict[
        '/decoder/sequencegenerator/att_trans/attention/state_trans/transform_states.W'].set_value(
            gh_model['B_dec_transition_0'])
    dec_param_dict[
        '/decoder/sequencegenerator/att_trans/attention/preprocess.W'].set_value(
            gh_model['A_dec_transition_0'])
    dec_param_dict[
        '/decoder/sequencegenerator/att_trans/attention/energy_comp/linear.W'].set_value(
            gh_model['D_dec_transition_0'])

    dec_param_dict[
        '/decoder/sequencegenerator/att_trans/decoder/state_initializer/linear_0.W'].set_value(
            gh_model['W_0_dec_initializer_0'])
    dec_param_dict[
        '/decoder/sequencegenerator/att_trans/decoder/state_initializer/linear_0.b'].set_value(
            gh_model['b_0_dec_initializer_0'])

    config['val_burn_in'] = -1

    # Initialize main loop
    main_loop = MainLoop(
        model=training_model,
        algorithm=algorithm,
        data_stream=tr_stream,
        extensions=[
            FinishAfter(after_n_batches=1),
            Sampler(model=search_model,
                    config=config,
                    data_stream=tr_stream,
                    every_n_batches=config['sampling_freq']),
            BleuValidator(
                sampling_input,
                samples=samples,
                config=config,
                model=search_model,
                data_stream=dev_stream,
                src_eos_idx=config['src_eos_idx'],
                trg_eos_idx=config['trg_eos_idx'],
                before_training=True,
                before_batch=True),  #every_n_batches=config['bleu_val_freq']),
            TrainingDataMonitoring([cost], after_batch=True),
            #Plot('En-Fr', channels=[['decoder_cost_cost']],
            #     after_batch=True),
            Printing(after_batch=True)
        ])

    # Train!
    main_loop.run()
Exemple #32
0
# Simple synthetic data
points = np.random.random(
    (10000, 2)
)  # Generated data, the input data should be a numpy array with the shape (n, 2)
categories = np.random.randint(
    0, 10, 10000
)  # Generated label, multi-class sampling method would consider the label information as an reason to select or not select an item. It would be a np.zeros(n) as default.

# Datasets used in our study
all_data = np.load(os.path.join('data', 'abalone.npz'))
points, categories = all_data['positions'], all_data['labels']

print(points.shape, categories.shape)

sampler = Sampler()

sampler.set_data(
    points, categories
)  # For single-class sampling methods like random sampling, categories is not needed to be provided
sampling_method = RandomSampling  # You can choose your desired sampling method.
rs_args = {
    'sampling_rate':
    0.3  # You can set the sampling ratio and other specific params for different sampling methods here.
}

sampler.set_sampling_method(
    sampling_method,
    **rs_args)  # Set Random Sampling for the sampler with necessary params
sampled_point, sampled_category = sampler.get_samples(
)  # Get the sampling result
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry import Polygon, Point, LineString
from queue import PriorityQueue

#Step 1 - Load Data
# This is the same obstacle data from the previous lesson.
filename = '../data/colliders.csv'
data = np.loadtxt(filename, delimiter=',', dtype='Float64', skiprows=2)
print(data)


#Step 2 - Sample Points in free space
from sampling import Sampler
sampler = Sampler(data)
polygons = sampler._polygons

# Example: sampling 100 points and removing
# ones conflicting with obstacles.
nodes = sampler.sample(300)
print(len(nodes))


#Step 3 - Connect Nodes

import numpy.linalg as LA
from sklearn.neighbors import KDTree

def can_connect(n1, n2):
    l = LineString([n1, n2])
Exemple #34
0
 def read_ply(self, file_name):
     num_samples = self.num_samples // len(self.files_list)
     if self.file_index == len(self.files_list) - 1:
         num_samples = num_samples + (self.num_samples - (num_samples * len(self.files_list)))
     
     root, ext = os.path.splitext(file_name)
     if not os.path.isfile(root + ".npy"):
         ply = PlyData.read(file_name)
         vertex = ply['vertex']
         (x, y, z) = (vertex[t] for t in ('x', 'y', 'z'))
         points = zip(x.ravel(), y.ravel(), z.ravel())
         np.save(root + ".npy", points)
     else:
         points = np.load(root + ".npy")
         
     if self.add_noise:
         self.data = utils.add_noise(points, prob=self.noise_prob, factor=self.noise_factor)
     else:
         self.data = np.asarray(points)
     
     #if self.data.shape[0] > 2e5:
     #        self.data, _ = Sampler.sample(self.data, -1, 2e5, sampling_algorithm=self.sampling_algorithm)
         
     pc_diameter = utils.get_pc_diameter(self.data)
     self.l = self.relL*pc_diameter
     
     rot = utils.angle_axis_to_rotation(self.rotation_angle, self.rotation_axis)
     
     
     self.data = utils.transform_pc(self.data, rot)
     #plotutils.show_pc(self.data)
     #mlab.show()
             
     #TODO: better sampling
     print "sampling file: ", file_name
     self.samples, self.sample_indices = Sampler.sample(self.data, -1, num_samples, file_name=file_name, sampling_algorithm=self.sampling_algorithm)
     self.samples = self.samples[0:num_samples]
     self.sample_indices = self.sample_indices[0:num_samples]
     
     self.tree = spatial.KDTree(self.data)
     
     #TODO:Intergrate with num_samples for consistency
     if self.filter_bad_samples:
         temp_file_samples = 'temp/' + os.path.basename(file_name) + '_' + str(num_samples) + '_filter' + str(self.filter_threshold) + '.npy'
         print 'samples file: ', temp_file_samples 
         if os.path.isfile(temp_file_samples):
             self.sample_indices = np.load(temp_file_samples)
             self.samples = self.data[self.sample_indices]
         else:
             self.samples, self.sample_indices = Sampler.sample(self.data, -1, num_samples*2, sampling_algorithm=self.sampling_algorithm)
             self.samples = self.samples[0:num_samples*2]
             self.sample_indices = self.sample_indices[0:num_samples*2]
             sample_indices_temp = []
             for idx in self.sample_indices:
                 if self.is_good_sample(self.data[idx], self.filter_threshold):
                     sample_indices_temp.append(idx)
                     if len(sample_indices_temp) >= num_samples:
                         break   
             assert (len(sample_indices_temp) >= num_samples)
             self.sample_indices = np.asarray(sample_indices_temp[0:num_samples])
             self.samples = self.data[self.sample_indices]
             np.save(temp_file_samples, self.sample_indices)
             #plotutils.show_pc(self.samples)
             #mlab.show()
     
     logging.basicConfig(filename='example.log',level=logging.DEBUG)
     return self.data