Пример #1
0
    def compute_async_process(self, input_queue):
        raise DeprecationWarning()

        assert (isinstance(input_queue, type(mQueue())))
        output_queue = mQueue(5)
        p = Process(target=self.compute_async_impl,
                    args=(input_queue, output_queue))
        p.start()
        return output_queue
Пример #2
0
 def __init__(self, hostname):
     self.cmds = {}
     self.executed_cmds = mQueue()
     self.default = None
     self.output = ""
     self.hostname = hostname
     self.username = ""
     self.password = ""
     self.prompt = ""
     self.protocol = "ssh"
     self.log = None
Пример #3
0
 def __init__(self):
     super(UDPGeolocate, self).__init__()
     # Constants
     self.GEOLOCATION_API_URL = 'http://ip-api.com/json/'
     self.WINDUMP_URL = 'https://www.winpcap.org/windump/install/bin/windump_3_9_5/WinDump.exe'
     self.PORT_PROBING_ATTEMPTS = 5
     self.CUR_DIR = os.path.dirname(os.path.realpath(__file__))
     self.HOST_IP = self.get_host_IP()
     # Initialise config attributes
     self.conf = {'min_pack_len': 200, 'port': None, 'timeout': 1}
     # Housekeeping and internal stuff
     self.running = True
     self.procs = []
     self.q = mQueue()
     # Ensure graceful exit
     signal.signal(signal.SIGINT, signal.default_int_handler)
     atexit.register(self.stop_app)
Пример #4
0
 def _build_workers(self,
                    func,
                    n=1,
                    on_kill=None,
                    timeout=1,
                    max_queue_size=-1):
     self.kill_workers(self.workers)
     self.workers = []
     self.queue = mQueue(max_queue_size)
     for _ in range(n):
         self.workers.append(
             ProcessWorker(
                 self.queue,
                 func,
                 on_kill=on_kill,
                 on_start=on_start,
                 timeout=timeout,
             ))
         self.workers[-1].start()
Пример #5
0
 def __init__(self,
              func,
              n=1,
              on_kill=None,
              on_start=None,
              timeout=1,
              max_queue_size=-1):
     self.queue = mQueue(max_queue_size)
     self._timeout = timeout
     self.workers = []
     for _ in range(n):
         self.workers.append(
             ProcessWorker(
                 self.queue,
                 func,
                 on_kill=on_kill,
                 on_start=on_start,
                 timeout=timeout,
             ))
         self.workers[-1].start()
Пример #6
0
def main():
    # TODO: Argparse input

    # p1 -> p2
    q1 = mQueue()
    # p2 -> p1
    q2 = mQueue()
    # p2 -> p3
    q3 = mQueue()
    # p3 -> p2
    q4 = mQueue()
    # p2 -> p4
    q5 = mQueue()
    # p4 -> p2
    q6 = mQueue()

    p1 = Process(target=process1_UDPsrv, args=(
        q1,
        q2,
    ))
    p2 = Process(target=process2_CP,
                 args=(q1, q2, q3, q4, q5, q6, addECS, ECSMASK))
    p3 = Process(target=process3_UDPsend,
                 args=(q3, q4, CDNSADDR, DNSTIMEOUT, DNSTRIES))
    p4 = Process(target=process4_TCPsend,
                 args=(q5, q6, CDNSADDR, DNSTIMEOUT, DNSTRIES))
    p1.start()
    p2.start()
    p3.start()
    p4.start()
    try:
        p1.join()
        p2.join()
        p3.join()
        p4.join()

    except KeyboardInterrupt:
        p1.terminate()
        p2.terminate()
        p3.terminate()
        p4.terminate()
        # TODO: Remember to flush IP tables
        print("--Exiting public DNS server program (Ctrl-C)--\n")
        sys.exit()

    print("Exiting public DNS server program...\n")
Пример #7
0
def main():
    """Run the main program."""
    parser = argparse.ArgumentParser(description="Custom DNS relay server")
    parser.add_argument("-saddr",
                        "--dns_relay_server_address",
                        action=saddrAction,
                        help="Valid IPv4 address for the DNS relay server",
                        default="127.0.0.1")
    parser.add_argument("-sport",
                        "--dns_relay_server_port",
                        help="Valid port for the DNS relay server",
                        action=sportAction,
                        type=int,
                        default=53)
    parser.add_argument("-ecs",
                        "--forward_ecs",
                        action=ecsAction,
                        help="Forward ECS with DNS - yes/no",
                        default="yes")
    parser.add_argument("-tcp",
                        "--use_tcp_security_step",
                        action=tcpAction,
                        help="Use TCP DNS security step - yes/no",
                        default="yes")
    parser.add_argument("-cname",
                        "--use_cname_security_step",
                        action=cnameAction,
                        help="Use CNAME DNS security step - yes/no",
                        default="yes")
    parser.add_argument("-randrgw",
                        "--randomize_destination_rgw",
                        action=randrgwAction,
                        help="Randomize destination RGW - yes/no",
                        default="yes")
    parser.add_argument("-cnamestr",
                        "--rgw_cname_string_component",
                        help="Leftmost part of dest. RGW dns-cname-soa config",
                        default="cname")
    parser.add_argument("-dnsto",
                        "--dns_timeout",
                        help="DNS request timeout towards RGW in seconds",
                        action=dnstoAction,
                        type=int,
                        default=3)
    parser.add_argument("-dnstry",
                        "--dns_request_attempts",
                        help="Max. DNS request attempts towards RGW",
                        action=dnstryAction,
                        type=int,
                        default=3)
    parser.add_argument('-rgws',
                        '--rgws_list',
                        nargs='+',
                        action=rgwsAction,
                        help='List of RGW address (str) and port (int) pairs')

    args = parser.parse_args()
    print("Starting the custom DNS relay server...\n")
    print("Server IP address and port: {}, {}\n".format(
        args.dns_relay_server_address, str(args.dns_relay_server_port)))

    servaddr = (args.dns_relay_server_address, args.dns_relay_server_port)

    if (args.forward_ecs == "yes"):
        print("Client subnet forwarding with DNS ECS is ON.\n")
        forwardECS = True
    else:
        print("Client subnet forwarding with DNS ECS is OFF.\n")
        forwardECS = False

    if (args.use_tcp_security_step == "yes"):
        print("DNS TCP security step is ON.\n")
        TCPstep = True
    else:
        print("DNS TCP security step is OFF.\n")
        TCPstep = False

    if (args.use_cname_security_step == "yes"):
        print("DNS CNAME security step is ON.\n")
        CNAMEstep = True
    else:
        print("DNS CNAME security step is OFF.\n")
        CNAMEstep = False

    if (args.randomize_destination_rgw == "yes"):
        print("Destination RGW randomization is ON.\n")
        randomizeRGW = True
    else:
        print("Destination RGW randomization is OFF.\n")
        randomizeRGW = False

    print("CNAME string component in use: ")
    servicecname = args.rgw_cname_string_component
    print(servicecname)
    print("\n")

    dnstimeout = args.dns_timeout
    dnstries = args.dns_request_attempts
    print("DNS request timeout in seconds: ")
    print(str(dnstimeout))
    print("\n")
    print("Maximum additional DNS request attempts: ")
    print(str(dnstries))
    print("\n")

    # Populating the destination RGW list
    if args.rgws_list:
        tempaddr = 0
        tempport = 0
        ipvalue = True
        for x in args.rgws_list:
            if ipvalue is True:
                ipvalue = False
                tempaddr = x
            else:
                ipvalue = True
                tempport = x
                rgwlist.append((tempaddr, int(tempport)))
        print("Following destination RGWs were given:\n")
        for x in rgwlist:
            print(x)
        print("\n")
    else:
        print("No destination RGWs given, using the default: \n")
        print("addr 127.0.0.1 port 54\n ")
        rgwlist.append(("127.0.0.1", 54))

    print("Server serves forever; exit by pressing CTRL-C")

    # Creating queues for communication between processes
    # p1 -> p3 (From UDP _server_ to Data handler)
    q1 = mQueue()

    # p3 -> p1 (From Data handler to clientside UDP _sender_)
    q2 = mQueue()

    # p2 -> p3 (From TCP server to data handler)
    q3 = mQueue()

    # p3 -> p2 (From data handler to TCP server)
    q4 = mQueue()

    # p3 -> p4 (From data handler to rgwside UDP/TCP sender)
    q5 = mQueue()

    # p4 -> p3 (From rgwside UDP/TCP sender to data handler)
    q6 = mQueue()

    p1 = Process(target=process1_UDPsrv, args=(q1, q2, servaddr))
    p2 = Process(target=process2_TCPsrv, args=(q3, q4, servaddr))
    p3 = Process(target=process3_CP,
                 args=(q1, q2, q3, q4, q5, q6, servicecname, ECSMASK,
                       forwardECS))
    p4 = Process(target=process4_fwdUDP,
                 args=(q5, q6, dnstimeout, dnstries, rgwlist, randomizeRGW))
    p1.start()
    p2.start()
    p3.start()
    p4.start()
    try:
        p1.join()
        p2.join()
        p3.join()
        p4.join()

    except KeyboardInterrupt:
        p1.terminate()
        p2.terminate()
        p3.terminate()
        p4.terminate()
        # TODO: Remember to flush IP tables
        print("--Exiting Custom DNS server program (Ctrl-C)--\n")
        sys.exit()

    print("Exiting Custom DNS server program...\n")
    def start_workers(self):
        """ start worker processes """

        # setter config
        setter_conf = {}
        for attr in [
                "analyze_move", "analyze_stale", "analyze_offsubnet",
                "auto_clear_stale", "auto_clear_offsubnet",
                "notify_stale_syslog", "notify_stale_email",
                "notify_offsubnet_syslog", "notify_offsubnet_email",
                "notify_move_syslog", "notify_move_email", "notify_email",
                "notify_syslog", "notify_syslog_port", "max_ep_events",
                "worker_hello", "worker_hello_multiplier",
                "trust_subscription", "queue_interval",
                "transitory_delete_time", "transitory_stale_time",
                "transitory_xr_stale_time", "transitory_offsubnet_time",
                "monitor_disable"
        ]:
            if hasattr(self, attr): setter_conf[attr] = getattr(self, attr)

        # manually stop any/all workers before starting workers
        self.stop_workers()
        self.all_workers = []
        self.workers = {}
        for wid in range(self.max_workers):
            logger.debug("starting worker id(%s)" % wid)
            w = {
                "wid": wid,
                "txQ": mQueue(),
                "rxQ": mQueue(),
                "prQ": mQueue(),
                "last_hello": 0
            }
            p = mProcess(
                target=start_ep_worker,
                kwargs={
                    "wid": w["wid"],
                    "txQ":
                    w["rxQ"],  # swap tx/rx queue from worker perspective
                    "rxQ": w["txQ"],
                    "prQ": w["prQ"],
                    "fabric": self.fabric,
                    "overlay_vnid": self.overlay_vnid
                })
            w["process"] = p
            # enqueue any specific variables for this worker via job
            w["txQ"].put(EPJob("setter", {}, data=setter_conf))
            p.start()
            self.workers[wid] = w
            self.all_workers.append(w)

        # setup/start watch worker
        if not self.wworker_disable:
            logger.debug("starting watcher worker")
            self.wworker = {
                "wid": "watcher",
                "txQ": mQueue(),
                "rxQ": mQueue(),
                "prQ": mQueue(),
                "last_hello": 0
            }
            p = mProcess(
                target=start_ep_worker,
                kwargs={
                    "wid": self.wworker["wid"],
                    "txQ": self.wworker["rxQ"],  # swap tx/rx queue
                    "rxQ": self.wworker["txQ"],
                    "prQ": self.wworker["prQ"],
                    "fabric": self.fabric,
                    "overlay_vnid": self.overlay_vnid
                })
            self.wworker["process"] = p
            self.wworker["txQ"].put(EPJob("setter", {}, data=setter_conf))
            p.start()
            self.all_workers.append(self.wworker)
        else:
            logger.debug("skipping watch worker")

        if self.pworker_disable:
            logger.debug("skipping priority worker")
            return

        # setup/start priority queue worker
        logger.debug("starting priority worker")
        bcastQ = []
        bcastPrQ = []
        for wid in self.workers:
            bcastQ.append(self.workers[wid]["txQ"])
            bcastPrQ.append(self.workers[wid]["prQ"])
        if not self.wworker_disable:
            bcastQ.append(self.wworker["txQ"])
            bcastPrQ.append(self.wworker["prQ"])
        self.pworker = {
            "wid": "pri",
            "txQ": mQueue(),
            "rxQ": mQueue(),
            "prQ": mQueue(),
            "last_hello": 0
        }
        p = mProcess(
            target=start_ep_priority_worker,
            kwargs={
                "wid": self.pworker["wid"],
                "txQ": self.pworker["rxQ"],  # swap tx/rq queue 
                "rxQ": self.pworker["txQ"],
                "prQ": self.pworker["prQ"],
                "bcastQ": bcastQ,
                "bcastPrQ": bcastPrQ,
                "fabric": self.fabric,
            })
        self.pworker["txQ"].put(EPJob("setter", {}, data=setter_conf))
        self.pworker["txQ"].put(EPJob("init", {}))
        self.pworker["process"] = p
        p.start()
        self.all_workers.append(self.pworker)

        # wait for priority worker setter and init job to successfully complete
        job = None
        try:
            job_setter = self.pworker["rxQ"].get(True, 3.0)
            job = self.pworker["rxQ"].get(True, 3.0)
        except Empty:
            pass
        if job is None or "success" not in job.key or not job.key["success"]:
            err = "failed to initialize priority worker"
            if job is not None and "error" in job.key: err = job.key["error"]
            logger.warn(err)
            self.stop_workers()
            raise Exception(err)
        else:
            logger.debug("successfully initialized priority worker")
Пример #9
0
from Queue import Queue
from multiprocessing import Queue as mQueue
from messages import QUIT

# note: we always expect queue items to be tuples: 
# [0] is a message type, [1] is an optional dict of arguments

gui_queue = Queue()
controller_queue = Queue()
upload_queue = Queue()
api_queue = Queue()
photo_queue = mQueue()

all_queues = ( gui_queue, photo_queue, controller_queue, upload_queue, api_queue )

def send_quit_all( except_for=None ):
    if except_for:
        for q in all_queues:
            if not q in except_for:
                q.put( QUIT )
    else:
        for q in all_queues:
            q.put( QUIT )

Пример #10
0
def generate_masterless_pillars(ids_=None,
                                skip=None,
                                processes=None,
                                executable=None,
                                threads=None,
                                debug=False,
                                local=None,
                                timeout=None,
                                loglevel=None,
                                config_dir=None,
                                env=None,
                                *args,
                                **kwargs):
    _s = __salt__
    _o = __opts__
    locs = _s['mc_locations.settings']()
    if processes is None:
        try:
            grains = salt.loader.grains(_o)
            processes = int(grains['num_cpus'])
        except ValueError:
            processes = 0
        if processes < 2:
            processes = 2
    if not executable:
        executable = os.path.join(locs['msr'], 'bin/salt-call')
    if not config_dir:
        config_dir = _o['config_dir']
    if not loglevel:
        loglevel = _o['log_level']
    if local is None:
        local = _o['file_client'] == 'local'
    ids_ = get_hosts(ids_)
    if isinstance(ids_, six.string_types):
        ids_ = ids_.split(',')
    if not threads:
        threads = 0
    if not skip:
        skip = []
    if not env:
        env = {}
    env = _s['mc_utils.dictupdate'](copy.deepcopy(dict(os.environ)), env)
    input_queue = Queue.Queue()
    if processes:
        output_queue = mQueue()
    else:
        output_queue = Queue.Queue()
    threads = int(threads)
    for ix, id_ in enumerate(ids_):
        if id_ in skip:
            log.info('Skipping pillar generation for {0}'.format(id_))
            continue
        input_queue.put(id_)
        # for debug
        # if ix >= 2: break
    workers = {}
    results = {}
    try:
        size = input_queue.qsize()
        i = 0
        while not input_queue.empty():
            i += 1
            id_ = input_queue.get()
            fargs = [id_, 'set_retcode=True']
            pargs = {
                'executable': executable,
                'func': 'mc_remote_pillar.generate_masterless_pillar',
                'args': fargs,
                'out': 'json',
                'timeout': timeout,
                'no_display_ret': True,
                'local': local,
                'config_dir': config_dir,
                'loglevel': loglevel
            }
            log.info('Getting pillar through saltcaller.call'
                     ' for {0}'.format(id_))
            log.debug('Arguments: {0}'.format(pargs))
            pargs.update({'env': env, 'output_queue': output_queue})
            log.info('ETA: {0}/{1}'.format(i, size))
            if threads:
                if len(workers) >= threads:
                    wait_pool(workers, output_queue, results)
                workers[id_] = (threading.Thread(target=saltcaller.call,
                                                 kwargs=pargs))
                workers[id_].start()
            elif processes:
                if len(workers) >= processes:
                    wait_processes_pool(workers, output_queue, results)
                workers[id_] = (multiprocessing.Process(target=saltcaller.call,
                                                        kwargs=pargs))
                workers[id_].start()
            else:
                saltcaller.call(**pargs)
                while not output_queue.empty():
                    item = output_queue.get()
                    handle_result(results, item)
        if threads:
            wait_pool(workers, output_queue, results)
        elif processes:
            wait_processes_pool(workers, output_queue, results)
    except (KeyboardInterrupt, Exception):
        if threads:
            for id_ in [a for a in workers]:
                th = workers.pop(id_, None)
                if th.is_alive() and th.ident:
                    th.join(0.01)
        elif processes:
            for id_ in [a for a in workers]:
                th = workers.pop(id_, None)
                if th.is_alive() and th.ident:
                    th.terminate()
        raise
    return results
Пример #11
0
    baseq.put('brag')
    baseq.put('facemask')
    baseq.put('getoutofhere')
    print('Queue-based queue: ', baseq)
    print('Queue-based queue: ', baseq.get())
    print('Queue-based queue: ', baseq.get())
    print('Queue-based queue: ', baseq.get())

    # mutliprocessing queue, shared job queues
    # allow queuedj items to be processed in parallel by multiple concurrent
    # workers. Process-based parallelization is popular in CPython due to 
    # the global interpreter lock(GIL) that prevent parallel executon on a 
    # single interpreter process.
    # mutliprocessing.Queue work around the GIL limitations.
    # This type of queue can store and transfer any pickle-able object across
    # process boundaries.
    from multiprocessing import Queue as mQueue
    print('=' * 50)
    mqueue = mQueue()
    mqueue.put('moxiao')
    mqueue.put('grab')
    mqueue.put('bangkok')
    print('multiprocessing Queue-based: ', mqueue)
    mqueue.get()
    print('multiprocessing Queue-based: ', mqueue)
    print('multiprocessing Queue-based: ', mqueue.get())
    print('multiprocessing Queue-based: ', mqueue.get())
    # if mqueue is empty, get() would blocks the current process and wait 
    #print('multiprocessing Queue-based: ', mqueue.get())
    
Пример #12
0
    def __init__(self, splited_keys, images, datasets, config_input, augmenter,
                 perception_interface):
        # sample inputs
        # splited_keys: _splited_keys_train[i_labels_per_division][i_steering_bins_perc][a list of keys]
        # images: [i_sensor][i_file_number] = (lastidx, lastidx + x.shape[0], x)
        # datasets: [i_target_name] = dim*batch matrix, where batch=#all_samples
        # config_input: configInputs
        # augmenter: config_input.augment

        # save the inputs
        self._splited_keys = splited_keys
        self._images = images
        self._targets = np.concatenate(
            tuple(datasets),
            axis=1)  # Cat the datasets, The shape is totalnum*totaldim
        self._config = config_input
        self._augmenter = augmenter

        self._batch_size = config_input.batch_size

        # prepare all the placeholders: 3 sources: _queue_image_input, _queue_targets, _queue_inputs
        self._queue_image_input = tf.placeholder(
            tf.float32,
            shape=[
                config_input.batch_size, config_input.feature_input_size[0],
                config_input.feature_input_size[1],
                config_input.feature_input_size[2]
            ])

        self._queue_shapes = [self._queue_image_input.shape]

        # config.targets_names: ['wp1_angle', 'wp2_angle', 'Steer', 'Gas', 'Brake', 'Speed']
        self._queue_targets = []
        for i in range(len(self._config.targets_names)):
            self._queue_targets.append(
                tf.placeholder(tf.float32,
                               shape=[
                                   config_input.batch_size,
                                   self._config.targets_sizes[i]
                               ]))
            self._queue_shapes.append(self._queue_targets[-1].shape)

        # self.inputs_names = ['Control', 'Speed']
        self._queue_inputs = []
        for i in range(len(self._config.inputs_names)):
            self._queue_inputs.append(
                tf.placeholder(tf.float32,
                               shape=[
                                   config_input.batch_size,
                                   self._config.inputs_sizes[i]
                               ]))
            self._queue_shapes.append(self._queue_inputs[-1].shape)

        self._queue = tf.FIFOQueue(
            capacity=config_input.queue_capacity,
            dtypes=[tf.float32] + [tf.float32] *
            (len(self._config.targets_names) + len(self._config.inputs_names)),
            shapes=self._queue_shapes)
        self._enqueue_op = self._queue.enqueue([self._queue_image_input] +
                                               self._queue_targets +
                                               self._queue_inputs)
        self._dequeue_op = self._queue.dequeue()

        #self.parallel_workers = Parallel(n_jobs=8, backend="threading")
        self.input_queue = mQueue(5)
        self.output_queue = mQueue(5)

        self.perception_interface = perception_interface

        if "mapping" in self._config.inputs_names:
            version = "v1"
            if hasattr(self._config, "mapping_version"):
                version = self._config.mapping_version
            self.mapping_helper = mapping_helper.mapping_helper(
                output_height_pix=self._config.map_height, version=version
            )  # using the default values, 30 meters of width view, 50*75*1 output size