Exemplo n.º 1
0
 def __init__(self, scheme=None):
     self.proc = None
     self.daemon = None
     self.stop = Event()
     self.scheme = scheme
Exemplo n.º 2
0
        _fields_ = [
            ('length', ctypes.c_uint),
            ('data', ctypes.c_byte * 64)
        ]

        def from_python(self, data):
            length = len(data)
            ctypes.memmove(self.data, (ctypes.c_byte * length).from_buffer(bytearray(data)), ctypes.sizeof(self.data))
            self.length = length

        def to_python(self):
            return str(bytearray(self.data[:self.length]))

    size = 100000
    q = Queue(Data, size, True) # True makes the queue synchronized
    run = Event()
    run.set()

    def worker(q, r):
        while r.is_set():
            try:
                v = q.get()
                assert(v.startswith('test'))
                sleep(0.00001)
            except Empty:
                pass

    def main(q):
        v = 0
        t = time()
        while v != (size * 3):
Exemplo n.º 3
0
from multiprocessing import Event

e = Event()
# e.set()
# e.clear()
# e.wait()
# e.is_set()
# 事件是通过is_set()的bool值,去标识e.wait() 的阻塞状态
# 当is_set()的bool值为False时,e.wait()是阻塞状态
# 当is_set()的bool值为True时,e.wait()是非阻塞状态
# 当使用set()时,是把is_set的bool变为True
# 当使用clear()时,是把is_set的bool变为False

print(e.is_set())  # False wait应该是阻塞住
e.set()  # 将is_set 的bool值变为True,将wait变为非阻塞
e.wait()
print(e.is_set())
print(123)
e.clear()
print(e.is_set())
e.wait()
print(123)
Exemplo n.º 4
0
def run(args):

    # --------------------> Preparation <-------------------- #
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.set_default_dtype(torch.float64)
    torch.set_num_threads(1)
    device = torch.device("cpu")

    # build a scalarization template
    scalarization_template = WeightedSumScalarization(
        num_objs=args.obj_num, weights=np.ones(args.obj_num) / args.obj_num)

    total_num_updates = int(
        args.num_env_steps) // args.num_steps // args.num_processes

    start_time = time.time()

    # initialize ep and population and opt_graph
    ep = EP()
    if args.obj_num == 2:
        population = Population2d(args)
    elif args.obj_num > 2:
        population = Population3d(args)
    else:
        raise NotImplementedError
    opt_graph = OptGraph()

    # Construct tasks for warm up
    elite_batch, scalarization_batch = initialize_warm_up_batch(args, device)
    rl_num_updates = args.warmup_iter
    for sample, scalarization in zip(elite_batch, scalarization_batch):
        sample.optgraph_id = opt_graph.insert(deepcopy(scalarization.weights),
                                              deepcopy(sample.objs), -1)

    episode = 0
    iteration = 0
    while iteration < total_num_updates:
        if episode == 0:
            print_info(
                '\n------------------------------- Warm-up Stage -------------------------------'
            )
        else:
            print_info(
                '\n-------------------- Evolutionary Stage: Generation {:3} --------------------'
                .format(episode))

        episode += 1

        offspring_batch = np.array([])

        # --------------------> RL Optimization <-------------------- #
        # compose task for each elite
        task_batch = []
        for elite, scalarization in \
                zip(elite_batch, scalarization_batch):
            task_batch.append(Task(
                elite, scalarization))  # each task is a (policy, weight) pair

        # run MOPG for each task in parallel
        processes = []
        results_queue = Queue()
        done_event = Event()

        for task_id, task in enumerate(task_batch):
            p = Process(target = MOPG_worker, \
                args = (args, task_id, task, device, iteration, rl_num_updates, start_time, results_queue, done_event))
            p.start()
            processes.append(p)

        # collect MOPG results for offsprings and insert objs into objs buffer
        all_offspring_batch = [[] for _ in range(len(processes))]
        cnt_done_workers = 0
        while cnt_done_workers < len(processes):
            rl_results = results_queue.get()
            task_id, offsprings = rl_results['task_id'], rl_results[
                'offspring_batch']
            for sample in offsprings:
                all_offspring_batch[task_id].append(Sample.copy_from(sample))
            if rl_results['done']:
                cnt_done_workers += 1

        # put all intermidiate policies into all_sample_batch for EP update
        all_sample_batch = []
        # store the last policy for each optimization weight for RA
        last_offspring_batch = [None] * len(processes)
        # only the policies with iteration % update_iter = 0 are inserted into offspring_batch for population update
        # after warm-up stage, it's equivalent to the last_offspring_batch
        offspring_batch = []
        for task_id in range(len(processes)):
            offsprings = all_offspring_batch[task_id]
            prev_node_id = task_batch[task_id].sample.optgraph_id
            opt_weights = deepcopy(
                task_batch[task_id].scalarization.weights).detach().numpy()
            for i, sample in enumerate(offsprings):
                all_sample_batch.append(sample)
                if (i + 1) % args.update_iter == 0:
                    prev_node_id = opt_graph.insert(opt_weights,
                                                    deepcopy(sample.objs),
                                                    prev_node_id)
                    sample.optgraph_id = prev_node_id
                    offspring_batch.append(sample)
            last_offspring_batch[task_id] = offsprings[-1]

        done_event.set()

        # -----------------------> Update EP <----------------------- #
        # update EP and population
        ep.update(all_sample_batch)
        population.update(offspring_batch)

        # ------------------- > Task Selection <--------------------- #
        if args.selection_method == 'moead':
            elite_batch, scalarization_batch = [], []
            weights_batch = []
            generate_weights_batch_dfs(0, args.obj_num, args.min_weight,
                                       args.max_weight, args.delta_weight, [],
                                       weights_batch)
            for weights in weights_batch:
                scalarization = deepcopy(scalarization_template)
                scalarization.update_weights(weights)
                scalarization_batch.append(scalarization)
                best_sample, best_value = None, -np.inf
                for sample in population.sample_batch:
                    value = scalarization.evaluate(torch.Tensor(sample.objs))
                    if value > best_value:
                        best_sample, best_value = sample, value
                elite_batch.append(best_sample)
        elif args.selection_method == 'prediction-guided':
            elite_batch, scalarization_batch, predicted_offspring_objs = population.prediction_guided_selection(
                args, iteration, ep, opt_graph, scalarization_template)
        elif args.selection_method == 'random':
            elite_batch, scalarization_batch = population.random_selection(
                args, scalarization_template)
        elif args.selection_method == 'ra':
            elite_batch = last_offspring_batch
            scalarization_batch = []
            weights_batch = []
            generate_weights_batch_dfs(0, args.obj_num, args.min_weight,
                                       args.max_weight, args.delta_weight, [],
                                       weights_batch)
            for weights in weights_batch:
                scalarization = deepcopy(scalarization_template)
                scalarization.update_weights(weights)
                scalarization_batch.append(scalarization)
        elif args.selection_method == 'pfa':
            if args.obj_num > 2:
                raise NotImplementedError
            elite_batch = last_offspring_batch
            scalarization_batch = []
            delta_ratio = (iteration + rl_num_updates + args.update_iter -
                           args.warmup_iter) / (total_num_updates -
                                                args.warmup_iter)
            delta_ratio = np.clip(delta_ratio, 0.0, 1.0)
            for i in np.arange(args.min_weight,
                               args.max_weight + 0.5 * args.delta_weight,
                               args.delta_weight):
                w = np.clip(i + delta_ratio * args.delta_weight,
                            args.min_weight, args.max_weight)
                weights = np.array([abs(w), abs(1.0 - w)])
                scalarization = deepcopy(scalarization_template)
                scalarization.update_weights(weights)
                scalarization_batch.append(scalarization)
        else:
            raise NotImplementedError

        print_info('Selected Tasks:')
        for i in range(len(elite_batch)):
            print_info('objs = {}, weight = {}'.format(
                elite_batch[i].objs, scalarization_batch[i].weights))

        iteration = min(iteration + rl_num_updates, total_num_updates)

        rl_num_updates = args.update_iter

        # ----------------------> Save Results <---------------------- #
        # save ep
        ep_dir = os.path.join(args.save_dir, str(iteration), 'ep')
        os.makedirs(ep_dir, exist_ok=True)
        with open(os.path.join(ep_dir, 'objs.txt'), 'w') as fp:
            for obj in ep.obj_batch:
                fp.write(('{:5f}' + (args.obj_num - 1) * ',{:5f}' +
                          '\n').format(*obj))

        # save population
        population_dir = os.path.join(args.save_dir, str(iteration),
                                      'population')
        os.makedirs(population_dir, exist_ok=True)
        with open(os.path.join(population_dir, 'objs.txt'), 'w') as fp:
            for sample in population.sample_batch:
                fp.write(('{:5f}' + (args.obj_num - 1) * ',{:5f}' +
                          '\n').format(*(sample.objs)))
        # save optgraph and node id for each sample in population
        with open(os.path.join(population_dir, 'optgraph.txt'), 'w') as fp:
            fp.write('{}\n'.format(len(opt_graph.objs)))
            for i in range(len(opt_graph.objs)):
                fp.write(('{:5f}' + (args.obj_num - 1) * ',{:5f}' + ';{:5f}' +
                          (args.obj_num - 1) * ',{:5f}' + ';{}\n').format(
                              *(opt_graph.weights[i]), *(opt_graph.objs[i]),
                              opt_graph.prev[i]))
            fp.write('{}\n'.format(len(population.sample_batch)))
            for sample in population.sample_batch:
                fp.write('{}\n'.format(sample.optgraph_id))

        # save elites
        elite_dir = os.path.join(args.save_dir, str(iteration), 'elites')
        os.makedirs(elite_dir, exist_ok=True)
        with open(os.path.join(elite_dir, 'elites.txt'), 'w') as fp:
            for elite in elite_batch:
                fp.write(('{:5f}' + (args.obj_num - 1) * ',{:5f}' +
                          '\n').format(*(elite.objs)))
        with open(os.path.join(elite_dir, 'weights.txt'), 'w') as fp:
            for scalarization in scalarization_batch:
                fp.write(('{:5f}' + (args.obj_num - 1) * ',{:5f}' +
                          '\n').format(*(scalarization.weights)))
        if args.selection_method == 'prediction-guided':
            with open(os.path.join(elite_dir, 'predictions.txt'), 'w') as fp:
                for objs in predicted_offspring_objs:
                    fp.write(('{:5f}' + (args.obj_num - 1) * ',{:5f}' +
                              '\n').format(*(objs)))
        with open(os.path.join(elite_dir, 'offsprings.txt'), 'w') as fp:
            for i in range(len(all_offspring_batch)):
                for j in range(len(all_offspring_batch[i])):
                    fp.write(('{:5f}' + (args.obj_num - 1) * ',{:5f}' +
                              '\n').format(*(all_offspring_batch[i][j].objs)))

    # ----------------------> Save Final Model <----------------------

    os.makedirs(os.path.join(args.save_dir, 'final'), exist_ok=True)

    # save ep policies & env_params
    for i, sample in enumerate(ep.sample_batch):
        torch.save(
            sample.actor_critic.state_dict(),
            os.path.join(args.save_dir, 'final', 'EP_policy_{}.pt'.format(i)))
        with open(
                os.path.join(args.save_dir, 'final',
                             'EP_env_params_{}.pkl'.format(i)), 'wb') as fp:
            pickle.dump(sample.env_params, fp)

    # save all ep objectives
    with open(os.path.join(args.save_dir, 'final', 'objs.txt'), 'w') as fp:
        for i, obj in enumerate(ep.obj_batch):
            fp.write(('{:5f}' + (args.obj_num - 1) * ',{:5f}' +
                      '\n').format(*(obj)))

    # save all ep env_params
    if args.obj_rms:
        with open(os.path.join(args.save_dir, 'final', 'env_params.txt'),
                  'w') as fp:
            for sample in ep.sample_batch:
                fp.write('obj_rms: mean: {} var: {}\n'.format(
                    sample.env_params['obj_rms'].mean,
                    sample.env_params['obj_rms'].var))
Exemplo n.º 5
0
def state_lattice_planner(n: int, m: int, file_name: str = "test", g_weight: float = 0.5, h_weight: float = 0.5,
                          costmap_file: str = "",
                          start_pos: tuple = (20, 10, 0), goal_pos: tuple = (20, 280, 0),
                          initial_heading: float = math.pi / 2, padding: int = 0,
                          turning_radius: int = 8, vel: int = 10, num_headings: int = 8,
                          num_obs: int = 130, min_r: int = 1, max_r: int = 8, upper_offset: int = 20,
                          lower_offset: int = 20, allow_overlap: bool = False,
                          obstacle_density: int = 6, obstacle_penalty: float = 3,
                          Kp: float = 3, Ki: float = 0.08, Kd: float = 0.5, inf_stream: bool = False,
                          save_animation: bool = False, save_costmap: bool = False, smooth_path: bool = False,
                          replan: bool = False, horizon: int = np.inf, y_axis_limit: int = 100, buffer: int = None,
                          move_yaxis_threshold: int = 20, new_obs_dist: int = None):
    # PARAM SETUP
    # --- costmap --- #
    load_costmap_file = costmap_file
    ship_vertices = np.array([[-1, -4],
                              [1, -4],
                              [1, 2],
                              [0, 4],
                              [-1, 2]])

    # load costmap object from file if specified
    if load_costmap_file:
        with open(load_costmap_file, "rb") as fd:
            costmap_obj = pickle.load(fd)
            # recompute costmap costs if obstacle penalty is different than original
            if costmap_obj.obstacle_penalty != obstacle_penalty:
                costmap_obj.update2(obstacle_penalty)
    else:
        # initialize costmap
        costmap_obj = CostMap(
            n, m, obstacle_penalty, min_r, max_r, inf_stream, y_axis_limit, num_obs, new_obs_dist
        )

        # generate obs up until buffer if in inf stream mode
        max_y = y_axis_limit + buffer if inf_stream else goal_pos[1]
        # generate random obstacles
        costmap_obj.generate_obstacles(start_pos[1], max_y, num_obs,
                                       upper_offset, lower_offset, allow_overlap)

    orig_obstacles = costmap_obj.obstacles.copy()

    # initialize ship object
    ship = Ship(ship_vertices, start_pos, initial_heading, turning_radius, padding)

    # get the primitives
    prim = Primitives(turning_radius, initial_heading, num_headings)

    # generate swath dict
    swath_dict = swath.generate_swath(ship, prim)

    print("WEIGHTS", g_weight, h_weight)
    # initialize a star object
    a_star = AStar(g_weight, h_weight, cmap=costmap_obj,
                   primitives=prim, ship=ship, first_initial_heading=initial_heading)

    # compute current goal
    curr_goal = (goal_pos[0], min(goal_pos[1], (start_pos[1] + horizon)), goal_pos[2])

    t0 = time.time()
    worked, smoothed_edge_path, nodes_visited, x1, y1, x2, y2, orig_path = \
        a_star.search(start_pos, curr_goal, swath_dict, smooth_path)

    init_plan_time = time.time() - t0
    print("Time elapsed: ", init_plan_time)
    print("Hz", 1 / init_plan_time)

    if worked:
        plot_obj = Plot(
            costmap_obj, prim, ship, nodes_visited, smoothed_edge_path.copy(),
            path_nodes=(x1, y1), smoothing_nodes=(x2, y2), horizon=horizon,
            inf_stream=inf_stream, y_axis_limit=y_axis_limit
        )
        path = Path(plot_obj.full_path)
    else:
        print("Failed to find path at step 0")
        exit(1)

    # init pymunk sim
    space = pymunk.Space()
    space.add(ship.body, ship.shape)
    space.gravity = (0, 0)
    staticBody = space.static_body  # create a static body for friction constraints

    # create the pymunk objects and the polygon patches for the ice
    polygons = [
        create_polygon(
            space, staticBody, (obs['vertices'] - np.array(obs['centre'])).tolist(),
            *obs['centre'], density=obstacle_density
        )
        for obs in costmap_obj.obstacles
    ]

    # From pure pursuit
    state = State(x=start_pos[0], y=start_pos[1], yaw=0.0, v=0.0)
    target_course = TargetCourse(path.path[0], path.path[1])
    target_ind = target_course.search_target_index(state)

    # init PID controller
    pid = PID(Kp, Ki, Kd, 0)
    pid.output_limits = (-1, 1)  # limit on PID output

    # generator to end matplotlib animation when it reaches the goal
    def gen():
        nonlocal at_goal
        i = 0
        while not at_goal:
            i += 1
            yield i
        raise StopIteration  # should stop animation

    def animate(frame, queue_state, pipe_path):
        nonlocal at_goal

        steps = 10
        # move simulation forward 20 ms seconds:
        for x in range(steps):
            space.step(0.02 / steps)

        # get current state
        ship_pos = (ship.body.position.x, ship.body.position.y, 0)  # straight ahead of boat is 0

        # check if ship has made it past the goal line
        if ship.body.position.y >= goal_pos[1]:
            at_goal = True
            print("\nAt goal, shutting down...")
            plt.close(plot_obj.map_fig)
            plt.close(plot_obj.sim_fig)
            queue_state.close()
            shutdown_event.set()
            return []

        # Pymunk takes left turn as negative and right turn as positive in ship.body.angle
        # To get proper error, we must flip the sign on the angle, as to calculate the setpoint,
        # we look at a point one lookahead distance ahead, and find the angle to that point with
        # arctan2, but using this, we will get positive values on the left and negative values on the right
        # As the angular velocity in pymunk uses the same convention as ship.body.angle, we must flip the sign
        # of the output as well
        output = -pid(-ship.body.angle)

        # should play around with frequency at which new state data is sent
        if frame % 20 == 0 and frame != 0 and replan:
            # update costmap and polygons
            to_add, to_remove = costmap_obj.update(polygons, ship.body.position.y)
            assert len(costmap_obj.obstacles) <= costmap_obj.total_obs

            # remove polygons if any
            for obs in to_remove:
                polygons.remove(obs)

            # add polygons if any
            polygons.extend([
                create_polygon(
                    space, staticBody, (obs['vertices'] - np.array(obs['centre'])).tolist(),
                    *obs['centre'], density=obstacle_density
                )
                for obs in to_add
            ])
            print("Total polygons", len(polygons))

            try:
                # empty queue to ensure latest state data is pushed
                queue_state.get_nowait()
            except Empty:
                pass

            # send updated state via queue
            queue_state.put({
                'ship_pos': ship_pos,
                'ship_body_angle': ship.body.angle,
                'costmap': costmap_obj.cost_map,
                'obstacles': costmap_obj.obstacles,
            }, block=False)
            print('\nSent new state data!')

        # check if there is a new path
        if pipe_path.poll():
            # get new path
            path_data = pipe_path.recv()
            new_path = path_data['path']  # this is the full path and in the correct order i.e. start -> goal
            print('\nReceived replanned path!')

            # compute swath cost of new path up until the max y distance of old path for a fair comparison
            # note, we do not include the path length in the cost
            full_swath, full_cost, current_cost = swath.compute_swath_cost(
                costmap_obj.cost_map, new_path, ship.vertices, threshold_dist=path.path[1][-1]
            )
            try:
                assert full_cost >= current_cost  # sanity check
            except AssertionError:
                print("Full and partial swath costs", full_cost, current_cost)

            path_expired = False
            prev_cost = None

            # check if old path is 'expired' regardless of costs
            if (path.path[1][-1] - ship_pos[1]) < horizon / 2:
                path_expired = True

            else:
                # clip old path based on ship y position
                old_path = path.clip_path(ship_pos[1])

                # compute cost of clipped old path
                _, prev_cost, _ = swath.compute_swath_cost(costmap_obj.cost_map, old_path, ship.vertices)

                print('\nPrevious Cost: {prev_cost:.3f}'.format(prev_cost=prev_cost))
                print('Current Cost: {current_cost:.3f}\n'.format(current_cost=current_cost))

            if path_expired or current_cost < prev_cost:
                if path_expired:
                    print("Path expired, applying new path regardless of cost!")
                else:
                    print("New path better than old path!")
                    path.new_path_cnt += 1

                plot_obj.update_path(
                    new_path, full_swath, path_data['path_nodes'],
                    path_data['smoothing_nodes'], path_data['nodes_expanded']
                )

                # update to new path
                path.path = new_path
                ship.set_path_pos(0)

                # update pure pursuit objects with new path
                target_course.update(path.path[0], path.path[1])
                state.update(ship.body.position.x, ship.body.position.y, ship.body.angle)

                # update costmap and map fig
                plot_obj.update_map(costmap_obj.cost_map)
                plot_obj.map_fig.canvas.draw()

            else:
                print("Old path better than new path")
                path.old_path_cnt += 1

        if ship.path_pos < np.shape(path.path)[1] - 1:
            # Translate linear velocity into direction of ship
            x_vel = math.sin(ship.body.angle)
            y_vel = math.cos(ship.body.angle)
            mag = math.sqrt(x_vel ** 2 + y_vel ** 2)
            x_vel = x_vel / mag * vel
            y_vel = y_vel / mag * vel
            ship.body.velocity = Vec2d(x_vel, y_vel)

            # Assign output of PID controller to angular velocity
            ship.body.angular_velocity = output

            # Update the pure pursuit state
            state.update(ship.body.position.x, ship.body.position.y, ship.body.angle)

            # Get look ahead index
            ind = target_course.search_target_index(state)

            if ind != ship.path_pos:
                # Find heading from current position to look ahead point
                ship.set_path_pos(ind)
                dy = path.path[1][ind] - ship.body.position.y
                dx = path.path[0][ind] - ship.body.position.x
                angle = np.arctan2(dy, dx) - a_star.first_initial_heading
                # set setpoint for PID controller
                pid.setpoint = angle

        # at each step animate ship and obstacle patches
        plot_obj.animate_ship(ship, horizon, move_yaxis_threshold)
        plot_obj.animate_obstacles(polygons)

        return plot_obj.get_sim_artists()

    # multiprocessing setup
    lifo_queue = Queue(maxsize=1)  # LIFO queue to send state information to A*
    conn_recv, conn_send = Pipe(duplex=False)  # pipe to send new path to controller and for plotting
    shutdown_event = Event()

    # setup a process to run A*
    print('\nStart process...')
    gen_path_process = Process(
        target=gen_path, args=(lifo_queue, conn_send, shutdown_event, ship, prim,
                               costmap_obj, swath_dict, a_star, goal_pos, horizon, smooth_path)
    )
    gen_path_process.start()

    # init vars used in animation methods
    at_goal = False

    # start animation in main process
    anim = animation.FuncAnimation(plot_obj.sim_fig,
                                   animate,
                                   frames=gen,
                                   fargs=(lifo_queue, conn_recv,),
                                   interval=20,
                                   blit=False,
                                   repeat=False,
                                   )

    if save_animation:
        anim.save(os.path.join('gifs', file_name), writer=animation.PillowWriter(fps=30))
    plt.show()

    total_dist_moved = 0
    # Compare cost maps
    for i, j in zip(orig_obstacles, polygons):
        pos = (j.body.position.x, j.body.position.y)
        area = j.area
        if area > 4:
            total_dist_moved = a_star.dist(i['centre'], pos) * (area/2) + total_dist_moved

    print('TOTAL DIST MOVED', total_dist_moved)
    print('Old/new path counts', '\n\told path', path.old_path_cnt, '\n\tnew path', path.new_path_cnt)

    shutdown_event.set()
    print('\n...done with process')
    gen_path_process.join()
    print('Completed multiprocessing')

    # get response from user for saving costmap
    if save_costmap:
        costmap_obj.save_to_disk()
    return total_dist_moved, init_plan_time
Exemplo n.º 6
0
    def test_last_blob_retrieval(self):
        kill_event = Event()
        dead_event_1 = Event()
        blob_hash_queue_1 = Queue()
        blob_hash_queue_2 = Queue()
        fast_uploader = Process(target=start_blob_uploader,
                                args=(blob_hash_queue_1, kill_event,
                                      dead_event_1, False))
        fast_uploader.start()
        self.server_processes.append(fast_uploader)
        dead_event_2 = Event()
        slow_uploader = Process(target=start_blob_uploader,
                                args=(blob_hash_queue_2, kill_event,
                                      dead_event_2, True))
        slow_uploader.start()
        self.server_processes.append(slow_uploader)

        logging.debug("Testing transfer")

        wallet = FakeWallet()
        peer_manager = PeerManager()
        peer_finder = FakePeerFinder(5553, peer_manager, 2)
        hash_announcer = FakeAnnouncer()
        rate_limiter = DummyRateLimiter()

        db_dir, blob_dir = mk_db_and_blob_dir()
        self.session = Session(
            conf.ADJUSTABLE_SETTINGS['data_rate'][1],
            db_dir=db_dir,
            node_id="abcd",
            peer_finder=peer_finder,
            hash_announcer=hash_announcer,
            blob_dir=blob_dir,
            peer_port=5553,
            use_upnp=False,
            rate_limiter=rate_limiter,
            wallet=wallet,
            blob_tracker_class=DummyBlobAvailabilityTracker,
            is_generous=conf.ADJUSTABLE_SETTINGS['is_generous_host'][1],
            external_ip="127.0.0.1")

        d1 = self.wait_for_hash_from_queue(blob_hash_queue_1)
        d2 = self.wait_for_hash_from_queue(blob_hash_queue_2)
        d = defer.DeferredList([d1, d2], fireOnOneErrback=True)

        def get_blob_hash(results):
            self.assertEqual(results[0][1], results[1][1])
            return results[0][1]

        d.addCallback(get_blob_hash)

        def download_blob(blob_hash):
            prm = self.session.payment_rate_manager
            downloader = StandaloneBlobDownloader(blob_hash,
                                                  self.session.blob_manager,
                                                  peer_finder, rate_limiter,
                                                  prm, wallet)
            d = downloader.download()
            return d

        def start_transfer(blob_hash):

            logging.debug("Starting the transfer")

            d = self.session.setup()
            d.addCallback(lambda _: download_blob(blob_hash))

            return d

        d.addCallback(start_transfer)

        def stop(arg):
            if isinstance(arg, Failure):
                logging.debug("Client is stopping due to an error. Error: %s",
                              arg.getTraceback())
            else:
                logging.debug("Client is stopping normally.")
            kill_event.set()
            logging.debug("Set the kill event")
            d1 = self.wait_for_event(dead_event_1, 15)
            d2 = self.wait_for_event(dead_event_2, 15)
            dl = defer.DeferredList([d1, d2])

            def print_shutting_down():
                logging.info("Client is shutting down")

            dl.addCallback(lambda _: print_shutting_down())
            dl.addCallback(lambda _: rm_db_and_blob_dir(db_dir, blob_dir))
            dl.addCallback(lambda _: arg)
            return dl

        d.addBoth(stop)
        return d
Exemplo n.º 7
0
    def test_multiple_uploaders(self):
        sd_hash_queue = Queue()
        num_uploaders = 3
        kill_event = Event()
        dead_events = [Event() for _ in range(num_uploaders)]
        ready_events = [Event() for _ in range(1, num_uploaders)]
        lbry_uploader = LbryUploader(sd_hash_queue, kill_event, dead_events[0],
                                     5209343, 9373419, 2**22)
        uploader = Process(target=lbry_uploader.start)
        uploader.start()
        self.server_processes.append(uploader)

        logging.debug("Testing multiple uploaders")

        wallet = FakeWallet()
        peer_manager = PeerManager()
        peer_finder = FakePeerFinder(5553, peer_manager, num_uploaders)
        hash_announcer = FakeAnnouncer()
        rate_limiter = DummyRateLimiter()
        sd_identifier = StreamDescriptorIdentifier()

        db_dir, blob_dir = mk_db_and_blob_dir()
        self.session = Session(
            conf.ADJUSTABLE_SETTINGS['data_rate'][1],
            db_dir=db_dir,
            node_id="abcd",
            peer_finder=peer_finder,
            hash_announcer=hash_announcer,
            blob_dir=blob_dir,
            peer_port=5553,
            use_upnp=False,
            rate_limiter=rate_limiter,
            wallet=wallet,
            blob_tracker_class=DummyBlobAvailabilityTracker,
            is_generous=conf.ADJUSTABLE_SETTINGS['is_generous_host'][1],
            external_ip="127.0.0.1")

        self.stream_info_manager = TempEncryptedFileMetadataManager()

        self.lbry_file_manager = EncryptedFileManager(self.session,
                                                      self.stream_info_manager,
                                                      sd_identifier)

        def start_additional_uploaders(sd_hash):
            for i in range(1, num_uploaders):
                uploader = Process(target=start_lbry_reuploader,
                                   args=(sd_hash, kill_event, dead_events[i],
                                         ready_events[i - 1], i, 2**10))
                uploader.start()
                self.server_processes.append(uploader)
            return defer.succeed(True)

        def wait_for_ready_events():
            return defer.DeferredList([
                self.wait_for_event(ready_event, 60)
                for ready_event in ready_events
            ])

        def make_downloader(metadata, prm):
            info_validator = metadata.validator
            options = metadata.options
            factories = metadata.factories
            chosen_options = [
                o.default_value
                for o in options.get_downloader_options(info_validator, prm)
            ]
            return factories[0].make_downloader(metadata, chosen_options, prm)

        def download_file(sd_hash):
            prm = self.session.payment_rate_manager
            d = download_sd_blob(self.session, sd_hash, prm)
            d.addCallback(sd_identifier.get_metadata_for_sd_blob)
            d.addCallback(make_downloader, prm)
            d.addCallback(lambda downloader: downloader.start())
            return d

        def check_md5_sum():
            f = open('test_file')
            hashsum = MD5.new()
            hashsum.update(f.read())
            self.assertEqual(hashsum.hexdigest(),
                             "e5941d615f53312fd66638239c1f90d5")

        def start_transfer(sd_hash):

            logging.debug("Starting the transfer")

            d = start_additional_uploaders(sd_hash)
            d.addCallback(lambda _: wait_for_ready_events())
            d.addCallback(lambda _: self.session.setup())
            d.addCallback(
                lambda _: add_lbry_file_to_sd_identifier(sd_identifier))
            d.addCallback(lambda _: self.lbry_file_manager.setup())
            d.addCallback(lambda _: download_file(sd_hash))
            d.addCallback(lambda _: check_md5_sum())

            return d

        def stop(arg):
            if isinstance(arg, Failure):
                logging.debug("Client is stopping due to an error. Error: %s",
                              arg.getTraceback())
            else:
                logging.debug("Client is stopping normally.")
            kill_event.set()
            logging.debug("Set the kill event")
            d = defer.DeferredList([
                self.wait_for_event(dead_event, 15)
                for dead_event in dead_events
            ])

            def print_shutting_down():
                logging.info("Client is shutting down")

            d.addCallback(lambda _: print_shutting_down())
            d.addCallback(lambda _: rm_db_and_blob_dir(db_dir, blob_dir))
            d.addCallback(lambda _: arg)
            return d

        d = self.wait_for_hash_from_queue(sd_hash_queue)
        d.addCallback(start_transfer)
        d.addBoth(stop)

        return d
Exemplo n.º 8
0
    if args.username and args.password:
        es = Elasticsearch(url.netloc,
                           request_timeout=5,
                           timeout=args.timeout,
                           http_auth=(args.username, args.password))
        if url.scheme == 'https':
            es = Elasticsearch(url.netloc,
                               use_ssl=True,
                               verify_certs=False,
                               request_timeout=5,
                               timeout=args.timeout,
                               http_auth=(args.username, args.password))
    else:
        es = Elasticsearch(url.netloc, request_timeout=5, timeout=args.timeout)
        if url.scheme == 'https':
            es = Elasticsearch(url.netloc,
                               use_ssl=True,
                               verify_certs=False,
                               request_timeout=5,
                               timeout=args.timeout)
    outq = Queue(maxsize=50000)
    alldone = Event()
    dumpproc = Process(target=dump, args=(es, outq, alldone))
    dumpproc.daemon = True
    dumpproc.start()
    while not alldone.is_set() or outq.qsize() > 0:
        try:
            print json.dumps(outq.get(block=False))
        except:
            time.sleep(0.1)
Exemplo n.º 9
0
 def __init__(self, *args, **kwargs):
     self._connection = None
     self.parent_id = kwargs.pop('parent_id')
     super(BaseWorker, self).__init__(*args, **kwargs)
     self.should_exit = Event()
Exemplo n.º 10
0
    def test_lbry_transfer(self):
        sd_hash_queue = Queue()
        kill_event = Event()
        dead_event = Event()
        lbry_uploader = LbryUploader(sd_hash_queue, kill_event, dead_event,
                                     5209343)
        uploader = Process(target=lbry_uploader.start)
        uploader.start()
        self.server_processes.append(uploader)

        logging.debug("Testing transfer")

        wallet = FakeWallet()
        peer_manager = PeerManager()
        peer_finder = FakePeerFinder(5553, peer_manager, 1)
        hash_announcer = FakeAnnouncer()
        rate_limiter = DummyRateLimiter()
        sd_identifier = StreamDescriptorIdentifier()

        db_dir, blob_dir = mk_db_and_blob_dir()
        self.session = Session(conf.ADJUSTABLE_SETTINGS['data_rate'][1],
                               db_dir=db_dir,
                               node_id="abcd",
                               peer_finder=peer_finder,
                               hash_announcer=hash_announcer,
                               blob_dir=blob_dir,
                               peer_port=5553,
                               dht_node_port=4445,
                               use_upnp=False,
                               rate_limiter=rate_limiter,
                               wallet=wallet,
                               blob_tracker_class=DummyBlobAvailabilityTracker,
                               dht_node_class=FakeNode,
                               is_generous=self.is_generous,
                               external_ip="127.0.0.1")

        self.lbry_file_manager = EncryptedFileManager(self.session,
                                                      sd_identifier)

        def make_downloader(metadata, prm):
            factories = metadata.factories
            return factories[0].make_downloader(metadata,
                                                prm.min_blob_data_payment_rate,
                                                prm, db_dir)

        def download_file(sd_hash):
            prm = self.session.payment_rate_manager
            d = download_sd_blob(self.session, sd_hash, prm)
            d.addCallback(sd_identifier.get_metadata_for_sd_blob)
            d.addCallback(make_downloader, prm)
            d.addCallback(lambda downloader: downloader.start())
            return d

        def check_md5_sum():
            f = open(os.path.join(db_dir, 'test_file'))
            hashsum = MD5.new()
            hashsum.update(f.read())
            self.assertEqual(hashsum.hexdigest(),
                             "4ca2aafb4101c1e42235aad24fbb83be")

        @defer.inlineCallbacks
        def start_transfer(sd_hash):
            logging.debug("Starting the transfer")
            yield self.session.setup()
            yield add_lbry_file_to_sd_identifier(sd_identifier)
            yield self.lbry_file_manager.setup()
            yield download_file(sd_hash)
            yield check_md5_sum()

        def stop(arg):
            if isinstance(arg, Failure):
                logging.debug("Client is stopping due to an error. Error: %s",
                              arg.getTraceback())
            else:
                logging.debug("Client is stopping normally.")
            kill_event.set()
            logging.debug("Set the kill event")
            d = self.wait_for_event(dead_event, 15)

            def print_shutting_down():
                logging.info("Client is shutting down")

            d.addCallback(lambda _: print_shutting_down())
            d.addCallback(lambda _: rm_db_and_blob_dir(db_dir, blob_dir))
            d.addCallback(lambda _: arg)
            return d

        d = self.wait_for_hash_from_queue(sd_hash_queue)
        d.addCallback(start_transfer)
        d.addBoth(stop)

        return d
Exemplo n.º 11
0
 def __init__(self):
     super(RegistryServer, self).__init__()
     self.notify = Event()
Exemplo n.º 12
0
 def __init__(self, args):
     super().__init__()
     self.args = args
     self.is_ready = Event()
Exemplo n.º 13
0
    def _run_tests(self, tests, **kwargs):
        # tests = dict where the key is a test group name and the value are
        # the tests to run
        tests_queue = Queue()
        results_queue = Queue()
        stop_event = Event()

        pending_tests = {}
        pending_not_thread_safe_tests = {}
        completed_tests = {}
        failures = 0
        errors = 0

        start_time = time.time()
        # First tun tests which are not thread safe in the main process
        for group in self._not_thread_safe:
            if group not in tests.keys():
                continue

            group_tests = tests[group]
            del tests[group]

            logger.debug('Running tests in a main process: %s' % (group_tests))
            pending_not_thread_safe_tests[group] = group_tests
            result = self._tests_func(tests=group_tests, worker_index=None)
            results_queue.put((group, result), block=False)

        for group, tests in tests.items():
            tests_queue.put((group, tests), block=False)
            pending_tests[group] = tests

        worker_count = self._worker_count
        if worker_count == 'auto':
            worker_count = len(pending_tests)
        elif worker_count == 'cpu':
            worker_count = multiprocessing.cpu_count()

        if worker_count > len(pending_tests):
            # No need to spawn more workers then there are tests.
            worker_count = len(pending_tests)

        worker_max = self._worker_max
        if worker_max == 'auto':
            worker_max = len(pending_tests)
        elif worker_max == 'cpu':
            worker_max = multiprocessing.cpu_count()

        if worker_count > worker_max:
            # No need to spawn more workers then there are tests.
            worker_count = worker_max

        worker_args = (tests_queue, results_queue, stop_event)
        logger.debug("Number of workers %s " % worker_count)
        workers = self._create_worker_pool(pool_size=worker_count,
                                           target_func=self._run_tests_worker,
                                           worker_args=worker_args)

        for index, worker in enumerate(workers):
            logger.debug('Staring worker %s' % (index))
            worker.start()

        if workers:
            while pending_tests:
                try:
                    try:
                        group, result = results_queue.get(timeout=self._parent_timeout,
                                                          block=True)
                    except Exception:
                        raise Empty

                    try:
                        if group not in pending_not_thread_safe_tests:
                            pending_tests.pop(group)
                        else:
                            pending_not_thread_safe_tests.pop(group)
                    except KeyError:
                        logger.debug('Got a result for unknown group: %s' % (group))
                    else:
                        completed_tests[group] = result
                        self._print_result(result)

                        if result.failures or result.errors:
                            failures += len(result.failures)
                            errors += len(result.errors)

                            if self.failfast:
                                # failfast is enabled, kill all the active workers
                                # and stop
                                for worker in workers:
                                    if worker.is_alive():
                                        worker.terminate()
                                break
                except Empty:
                    worker_left = False

                    for worker in workers:
                        if worker.is_alive():
                            worker_left = True
                            break

                    if not worker_left:
                        break

        # We are done, signalize all the workers to stop
        stop_event.set()

        end_time = time.time()
        self._exit(start_time, end_time, failures, errors)
Exemplo n.º 14
0
 def __init__(self):
     self.proc = None
     self.daemon = None
     self.stop = Event()
Exemplo n.º 15
0
# Generate hash functions (as block masks)
params['masks'] = gen_masks(params['K'], params['T'], params['L'])

# Construct array of first T powers of 2 (in a reverse manner) for conversion between bit array and decimal values
params['bit_converter'] = 2**np.arange(params['T'])[::-1]

# Get numbre of samples
pmbr = ParrallelMacsBufferReader(params['data_file'], params['K'])
params['M'] = pmbr.get_data_shape()[0]

# Multiprocessing synchronization objects
producer_lock = Lock()
analyzer_lock = Lock()

# Event indicating that all hash producers are done with proessing current segment
all_producers_done_event = Event()

# Event indicating that all sketch analyzers are done
all_analyzers_done_event = Event()

# List of events indicating for every hash producer if it is idle (set) or working (clear).
free_producers_events = [Event() for i in range(params['num_producers'])]
for e in free_producers_events:
    e.set()

# Manager for constructing shared-memory objects
manager = Manager()

# Dictionary holding shared-memory objects produced by server process manager
mpc_data = dict()
Exemplo n.º 16
0
def test_enqueue(broker, admin_user):
    broker.list_key = 'cluster_test:q'
    broker.delete_queue()
    a = async_task('django_q.tests.tasks.count_letters',
                   DEFAULT_WORDLIST,
                   hook='django_q.tests.test_cluster.assert_result',
                   broker=broker)
    b = async_task('django_q.tests.tasks.count_letters2',
                   WordClass(),
                   hook='django_q.tests.test_cluster.assert_result',
                   broker=broker)
    # unknown argument
    c = async_task('django_q.tests.tasks.count_letters',
                   DEFAULT_WORDLIST,
                   'oneargumentoomany',
                   hook='django_q.tests.test_cluster.assert_bad_result',
                   broker=broker)
    # unknown function
    d = async_task('django_q.tests.tasks.does_not_exist',
                   WordClass(),
                   hook='django_q.tests.test_cluster.assert_bad_result',
                   broker=broker)
    # function without result
    e = async_task('django_q.tests.tasks.countdown', 100000, broker=broker)
    # function as instance
    f = async_task(multiply, 753, 2, hook=assert_result, broker=broker)
    # model as argument
    g = async_task('django_q.tests.tasks.get_task_name',
                   Task(name='John'),
                   broker=broker)
    # args,kwargs, group and broken hook
    h = async_task('django_q.tests.tasks.word_multiply',
                   2,
                   word='django',
                   hook='fail.me',
                   broker=broker)
    # args unpickle test
    j = async_task('django_q.tests.tasks.get_user_id',
                   admin_user,
                   broker=broker,
                   group='test_j')
    # q_options and save opt_out test
    k = async_task('django_q.tests.tasks.get_user_id',
                   admin_user,
                   q_options={
                       'broker': broker,
                       'group': 'test_k',
                       'save': False,
                       'timeout': 90
                   })
    # test unicode
    assert Task(name='Amalia').__unicode__() == 'Amalia'
    # check if everything has a task id
    assert isinstance(a, str)
    assert isinstance(b, str)
    assert isinstance(c, str)
    assert isinstance(d, str)
    assert isinstance(e, str)
    assert isinstance(f, str)
    assert isinstance(g, str)
    assert isinstance(h, str)
    assert isinstance(j, str)
    assert isinstance(k, str)
    # run the cluster to execute the tasks
    task_count = 10
    assert broker.queue_size() == task_count
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    # push the tasks
    for i in range(task_count):
        pusher(task_queue, stop_event, broker=broker)
    assert broker.queue_size() == 0
    assert task_queue.qsize() == task_count
    task_queue.put('STOP')
    # test wait timeout
    assert result(j, wait=10) is None
    assert fetch(j, wait=10) is None
    assert result_group('test_j', wait=10) is None
    assert result_group('test_j', count=2, wait=10) is None
    assert fetch_group('test_j', wait=10) is None
    assert fetch_group('test_j', count=2, wait=10) is None
    # let a worker handle them
    result_queue = Queue()
    worker(task_queue, result_queue, Value('f', -1))
    assert result_queue.qsize() == task_count
    result_queue.put('STOP')
    # store the results
    monitor(result_queue)
    assert result_queue.qsize() == 0
    # Check the results
    # task a
    result_a = fetch(a)
    assert result_a is not None
    assert result_a.success is True
    assert result(a) == 1506
    # task b
    result_b = fetch(b)
    assert result_b is not None
    assert result_b.success is True
    assert result(b) == 1506
    # task c
    result_c = fetch(c)
    assert result_c is not None
    assert result_c.success is False
    # task d
    result_d = fetch(d)
    assert result_d is not None
    assert result_d.success is False
    # task e
    result_e = fetch(e)
    assert result_e is not None
    assert result_e.success is True
    assert result(e) is None
    # task f
    result_f = fetch(f)
    assert result_f is not None
    assert result_f.success is True
    assert result(f) == 1506
    # task g
    result_g = fetch(g)
    assert result_g is not None
    assert result_g.success is True
    assert result(g) == 'John'
    # task h
    result_h = fetch(h)
    assert result_h is not None
    assert result_h.success is True
    assert result(h) == 12
    # task j
    result_j = fetch(j)
    assert result_j is not None
    assert result_j.success is True
    assert result_j.result == result_j.args[0].id
    # check fetch, result by name
    assert fetch(result_j.name) == result_j
    assert result(result_j.name) == result_j.result
    # groups
    assert result_group('test_j')[0] == result_j.result
    assert result_j.group_result()[0] == result_j.result
    assert result_group('test_j', failures=True)[0] == result_j.result
    assert result_j.group_result(failures=True)[0] == result_j.result
    assert fetch_group('test_j')[0].id == [result_j][0].id
    assert fetch_group('test_j', failures=False)[0].id == [result_j][0].id
    assert count_group('test_j') == 1
    assert result_j.group_count() == 1
    assert count_group('test_j', failures=True) == 0
    assert result_j.group_count(failures=True) == 0
    assert delete_group('test_j') == 1
    assert result_j.group_delete() == 0
    deleted_group = delete_group('test_j', tasks=True)
    assert deleted_group is None or deleted_group[0] == 0  # Django 1.9
    deleted_group = result_j.group_delete(tasks=True)
    assert deleted_group is None or deleted_group[0] == 0  # Django 1.9
    # task k should not have been saved
    assert fetch(k) is None
    assert fetch(k, 100) is None
    assert result(k, 100) is None
    broker.delete_queue()
Exemplo n.º 17
0
 def __init__(self, workload_settings, target_settings, timer=None, *args):
     self.ws = workload_settings
     self.ts = target_settings
     self.timer = timer and Timer(timer, self.abort) or None
     self.shutdown_event = timer and Event() or None
     self.worker_processes = []
Exemplo n.º 18
0
    startTime = time()
    showInfo = False
    noOutput = False
    headless = False
    vnc = False
    for arg in argv:
        if arg == '--info':
            showInfo = True
        if arg == '--nooutput':
            noOutput = True
        if arg == '--headless':
            headless = True
        if arg == '--vnc':
            vnc = True

    shutdownEvent = Event()
    config = Config()
    processes = []
    console = Console(ouput=not noOutput)
    driverManager = DriverManager(console, shutdownEvent)
    driverVersion = driverManager.getDriverVersion('chrome')
    browserVersion = driverManager.getBrowserVersion('chrome')
    client = boto3.client('sqs')

    totalMessageReceived = 0
    stats = Stats()
    messages = []
    lastProcessStart = 0
    lockThreadsCount = Lock()

    def _showStats():
Exemplo n.º 19
0
    def test_double_download(self):
        sd_hash_queue = Queue()
        kill_event = Event()
        dead_event = Event()
        lbry_uploader = LbryUploader(sd_hash_queue, kill_event, dead_event,
                                     5209343)
        uploader = Process(target=lbry_uploader.start)
        uploader.start()
        self.server_processes.append(uploader)

        logging.debug("Testing double download")

        wallet = FakeWallet()
        peer_manager = PeerManager()
        peer_finder = FakePeerFinder(5553, peer_manager, 1)
        hash_announcer = FakeAnnouncer()
        rate_limiter = DummyRateLimiter()
        sd_identifier = StreamDescriptorIdentifier()

        downloaders = []

        db_dir, blob_dir = mk_db_and_blob_dir()
        self.session = Session(
            conf.ADJUSTABLE_SETTINGS['data_rate'][1],
            db_dir=db_dir,
            node_id="abcd",
            peer_finder=peer_finder,
            hash_announcer=hash_announcer,
            blob_dir=blob_dir,
            peer_port=5553,
            use_upnp=False,
            rate_limiter=rate_limiter,
            wallet=wallet,
            blob_tracker_class=DummyBlobAvailabilityTracker,
            is_generous=conf.ADJUSTABLE_SETTINGS['is_generous_host'][1],
            external_ip="127.0.0.1")

        self.stream_info_manager = DBEncryptedFileMetadataManager(
            self.session.db_dir)
        self.lbry_file_manager = EncryptedFileManager(self.session,
                                                      self.stream_info_manager,
                                                      sd_identifier)

        @defer.inlineCallbacks
        def make_downloader(metadata, prm):
            info_validator = metadata.validator
            options = metadata.options
            factories = metadata.factories
            chosen_options = [
                o.default_value
                for o in options.get_downloader_options(info_validator, prm)
            ]
            downloader = yield factories[0].make_downloader(
                metadata, chosen_options, prm)
            defer.returnValue(downloader)

        def append_downloader(downloader):
            downloaders.append(downloader)
            return downloader

        @defer.inlineCallbacks
        def download_file(sd_hash):
            prm = self.session.payment_rate_manager
            sd_blob = yield download_sd_blob(self.session, sd_hash, prm)
            metadata = yield sd_identifier.get_metadata_for_sd_blob(sd_blob)
            downloader = yield make_downloader(metadata, prm)
            downloaders.append(downloader)
            finished_value = yield downloader.start()
            defer.returnValue(finished_value)

        def check_md5_sum():
            f = open('test_file')
            hashsum = MD5.new()
            hashsum.update(f.read())
            self.assertEqual(hashsum.hexdigest(),
                             "4ca2aafb4101c1e42235aad24fbb83be")

        def delete_lbry_file():
            logging.debug("deleting the file")
            d = self.lbry_file_manager.delete_lbry_file(downloaders[0])
            d.addCallback(
                lambda _: self.lbry_file_manager.get_count_for_stream_hash(
                    downloaders[0].stream_hash))
            d.addCallback(lambda c: self.stream_info_manager.delete_stream(
                downloaders[1].stream_hash) if c == 0 else True)
            return d

        def check_lbry_file():
            d = downloaders[1].status()
            d.addCallback(lambda _: downloaders[1].status())

            def check_status_report(status_report):
                self.assertEqual(status_report.num_known,
                                 status_report.num_completed)
                self.assertEqual(status_report.num_known, 3)

            d.addCallback(check_status_report)
            return d

        @defer.inlineCallbacks
        def start_transfer(sd_hash):
            logging.debug("Starting the transfer")
            yield self.session.setup()
            yield self.stream_info_manager.setup()
            yield add_lbry_file_to_sd_identifier(sd_identifier)
            yield self.lbry_file_manager.setup()
            yield download_file(sd_hash)
            yield check_md5_sum()
            yield download_file(sd_hash)

            yield check_lbry_file()
            yield delete_lbry_file()

        def stop(arg):
            if isinstance(arg, Failure):
                logging.debug("Client is stopping due to an error. Error: %s",
                              arg.getTraceback())
            else:
                logging.debug("Client is stopping normally.")
            kill_event.set()
            logging.debug("Set the kill event")
            d = self.wait_for_event(dead_event, 15)

            def print_shutting_down():
                logging.info("Client is shutting down")

            d.addCallback(lambda _: print_shutting_down())
            d.addCallback(lambda _: rm_db_and_blob_dir(db_dir, blob_dir))
            d.addCallback(lambda _: arg)
            return d

        d = self.wait_for_hash_from_queue(sd_hash_queue)
        d.addCallback(start_transfer)
        d.addBoth(stop)
        return d
Exemplo n.º 20
0
            for q in queues:
                q.put(w)


if __name__ == "__main__":
    buffer_size = int(1e+6)
    env_dict = {"obs": {"shape": 4},
                "act": {},
                "rew": {},
                "next_obs": {"shape": 4},
                "done": {}}
    n_explorer = 4

    global_rb = MPPrioritizedReplayBuffer(buffer_size,env_dict)

    is_training_done = Event()
    is_training_done.clear()

    qs = [SimpleQueue() for _ in range(n_explorer)]
    ps = [Process(target=explorer,
                  args=[global_rb,env_dict,is_training_done,q])
          for q in qs]

    for p in ps:
        p.start()

    learner(global_rb,qs)
    is_training_done.set()

    for p in ps:
        p.join()
Exemplo n.º 21
0
    process_read = []

    #CL-> Explain how each of the different list works together 
    '''
    As Multiprocessing creates different processes, the CPU will allocate a different sector of memory to each process
    With each having a different sector of memory, sharing of data in that situation is not possible
    With use of Managers, it creates a global shared memory between processes which all processes are able to have reference to
    '''
    manager = Manager()
    initial_id = manager.list() # Store Initial camID before replacement
    r_id = manager.list() # Store camID to be replaced to (with reference to camera 1)
    cam_id = manager.list() # Store camera camID of detected re-identifications
    unique_id = manager.list() # Store list of Unique IDs
    # Flag to indicate that all main detections processes have terminated, and indicate re-id process to end. 
    # Else it'll be a zombie running in background
    flag = Event() 

    rp = Process(target=extract_query,args=(initial_id,r_id,cam_id,flag,unique_id))
 
    #Run up reid process only when more than 1 camera source is inputted
    if len(data) > 1: # Only run re-identification if more than 1 camera source declared (Obj 3)
        rp.start()
    
    #Declare Queue objects and Processes (Obj 1)
    #For each camera, two separate process will be forked to handle queue and detection
    #"Process capture" : Process to handle the ingestion of the camera frames into buffer
    #"Process read" : Process to handle the retrieval of the buffer and then perform detection
    for i in range(len(data)):
        queue_name = 'processes_'+str(processes)
        queue_name = manager.Queue()
        # Queue first, then detection (Obj 2)
Exemplo n.º 22
0
 def setup(self):
     self.root = RootTask(should_stop=Event(), should_pause=Event())
     self.task = ConditionalTask(name='Test')
     self.root.add_child_task(0, self.task)
     self.check = CheckTask(name='check')
     self.task.add_child_task(0, self.check)
contentWeight = 6
relationWeight = 6
##########################

#
# # In[4]:
#
# print len(datasets)
# extracted_graph = extract(datasets)
# # with open("/Users/jimmccusker/Dropbox/Public/ecstra-unstemmed.ttl",'wb') as out:
# with open("/home/jason/Documents/TWC/linkipedia/output_file/ecstra-unstemmed-dataone-0318.ttl",'wb') as out:
#     out.write(extracted_graph.serialize(format='turtle'))

NUMBER_OF_PROCESSES = 6

processed = Event()


def work(id, jobs, result):
    while True:
        try:
            dataset = jobs.get(timeout=10)
        except Empty:
            processed.set()
            break
        try:
            matched_classes = collections.defaultdict(set)
            r = set()
            print dataset
            #abstract = get_dataset_abstract(dataset)
            attributes = get_attributes(dataset)
Exemplo n.º 24
0
def unit_test():
    global TEST
    TEST=True
    VULBOX_IP = '127.0.0.1'
    TEST_PORT = 5342
    s = Scheduler(TEAM_LIST_PATH)
    s.update_status()
#    time.sleep(1)
    s.update_state(TEST_GETSTATE)

    print 'teams'
    print s.teams

    print 'services'
    print s.services

    print 'scripts'
    print s.scripts

    print 'run_list'
    print s.run_list

    s.update_status()
    #time.sleep(1)

    tid = 20
    script_id = 2
    service_id = 1

    
    s.teams = {1:{"team_id":1,"ip":'127.0.12.2'},tid:{"team_id":tid,"ip":'127.0.13.2'}}

    gs = copy.deepcopy(TEST_GETSTATE)
    gs['state_expire']=60
    gs['run_scripts'].append( {"team_id":tid,"run_list":[1,script_id,3]})
    s.update_state(gs)
    print ''
    print s.get_rand_delay(s.run_list[tid])

    tid = 1
    sid =1
    srvid=1

    #test script run
    print 'trying to run script'
    p = os.path.join(os.getcwd(),'test','scripts','20','2','2.py')
    try:
        
        s.runscript(Event(),tid,sid,srvid,300,'setflag',
                 p,VULBOX_IP,TEST_PORT,0.01)
    except Exception as e:
        print 'Exception '+str(e)


    s.update_status()
    #time.sleep(1)

    #test script
    print 'testing bundle script run'
    is_bundle = 1
    fn = s.get_script_repo_path(sid)
    print 'Repo path '+fn
    if os.path.exists(fn):
        print fn+'exits'
        os.remove(fn)
    s.update_script_repo(sid)
    assert os.path.exists(fn)
    print 'repo updated'

    fn_tgz = s.get_script_path(tid,sid,is_bundle)
    fn_f = os.path.join(os.path.dirname(fn_tgz),'exploit.py')
    
    print 'Script  path '+str((fn_f,fn_tgz))
    if os.path.exists(fn_f):
        print fn_f+'exits'
        os.remove(fn_f)

    if os.path.exists(fn_tgz):
        print fn_tgz+'exits'
        os.remove(fn_tgz)

    print 'updating script'
    fn = s.update_script(tid,sid,is_bundle)
    assert fn == fn_f
    assert os.path.exists(fn_f)
    assert os.path.exists(fn_tgz)

    print 'Bundled script extraction OK'
    print 'Trying to run'
    try:
        s.runscript(Event(),tid,sid,srvid,300,'exploit',
                 fn,'127.0.2.1', TEST_PORT,0.1)
    except Exception as e:
        print 'Exception '+str(e)
    
    exit(0)

#    s.schedule_scripts()
    c = 0
    while True:
        s.clean_process()
        s.update_status()
        c+=1
        if c==5:
            s.kill_process()
        time.sleep(1)

    return 0
Exemplo n.º 25
0
contracts = contracts[args.skip:]

log("Setting up workers.")
# Set up multiprocessing result list and queue.
manager = Manager()

# This list contains analysis results as
# (filename, category, meta, analytics) quadruples.
res_list = manager.list()

# Holds results transiently before flushing to res_list
res_queue = SimpleQueue()

# Start the periodic flush process, only run while run_signal is set.
run_signal = Event()
run_signal.set()
flush_proc = Process(target=flush_queue,
                     args=(run_signal, res_queue, res_list))
flush_proc.start()

workers = []
avail_jobs = list(range(args.jobs))
contract_iter = enumerate(contracts)
contracts_exhausted = False

log("Analysing...\n")
try:
    while not contracts_exhausted:
        # If there's both workers and contracts available, use the former to work on the latter.
        while not contracts_exhausted and len(avail_jobs) > 0:
Exemplo n.º 26
0
    def __init__(
        self,
        exp_dir,
        policy_pickle,
        env_pickle,
        baseline_pickle,
        dynamics_model_pickle,
        feed_dicts,
        n_itr,
        num_inner_grad_steps,
        flags_need_query,
        num_rollouts_per_iter,
        config,
        simulation_sleep,
        initial_random_samples=True,
        start_itr=0,
        sampler_str='mbmpo',
    ):

        self.initial_random_samples = initial_random_samples

        worker_instances = [
            WorkerData(
                num_rollouts_per_iter=num_rollouts_per_iter,
                simulation_sleep=simulation_sleep,
            ),
            WorkerModel(),
            WorkerPolicy(num_inner_grad_steps=num_inner_grad_steps,
                         sampler_str=sampler_str),
        ]
        names = ["Data", "Model", "Policy"]
        # one queue for each worker, tasks assigned by scheduler and previous worker
        queues = [Queue(-1) for _ in range(3)]
        # worker sends task-completed notification and time info to scheduler
        worker_remotes, remotes = zip(*[Pipe() for _ in range(3)])
        # stop condition
        stop_cond = Event()
        # current worker needs query means previous workers does not auto push
        # skipped checking here
        flags_auto_push = [
            not flags_need_query[1], not flags_need_query[2],
            not flags_need_query[0]
        ]

        self.ps = [
            Process(
                target=worker_instance,
                name=name,
                args=(
                    exp_dir,
                    policy_pickle,
                    env_pickle,
                    baseline_pickle,
                    dynamics_model_pickle,
                    feed_dict,
                    queue_prev,
                    queue,
                    queue_next,
                    worker_remote,
                    start_itr,
                    n_itr,
                    stop_cond,
                    need_query,
                    auto_push,
                    config,
                ),
            ) for (worker_instance, name, feed_dict, queue_prev, queue,
                   queue_next, worker_remote, need_query, auto_push) in zip(
                       worker_instances,
                       names,
                       feed_dicts,
                       queues[2:] + queues[:2],
                       queues,
                       queues[1:] + queues[:1],
                       worker_remotes,
                       flags_need_query,
                       flags_auto_push,
                   )
        ]

        # central scheduler sends command and receives receipts
        self.names = names
        self.queues = queues
        self.remotes = remotes
Exemplo n.º 27
0
            'cap' + str(work) + '.pcap'
        ],
                             stdout=subprocess.PIPE)

    print("Stopping tcpcapture ...")
    p.send_signal(subprocess.signal.SIGTERM)
    print("tcpdump completed")


# Save a reference to the original signal handler for SIGINT.
default_handler = signal.getsignal(signal.SIGINT)

# Set signal handling of SIGINT to ignore mode.
signal.signal(signal.SIGINT, signal.SIG_IGN)

exit_event = Event()
work_queue = Queue()

print("exit_event  - " + str(exit_event.is_set()))

# Spawn the worker process.
cp = Process(
    target=tcpdump_capture,
    args=(exit_event, work_queue),
)
cp.start()

# Since we spawned all the necessary processes already,
# restore default signal handling for the parent process.
signal.signal(signal.SIGINT, default_handler)
Exemplo n.º 28
0
 def __init__(self, pid=None):
     pid = pid or os.getpid()
     self.start_mem = psutil.Process(pid).memory_info().rss
     self.event = Event()
     self.p = MemoryTrackingProcess(pid, self.event)
Exemplo n.º 29
0
 def setup(self):
     self.root = RootTask(should_stop=Event(), should_pause=Event())
     self.task = WhileTask(task_name='Test')
     self.root.children_task.append(self.task)
     self.check = CheckTask(task_name='check')
     self.task.children_task.append(self.check)
 def _setup_comunication(self):
     self._server_closed = Event()
     process = Process(target=self._gather_subscriptions,
                       args=(self._server_closed, ),
                       daemon=True)
     process.start()