示例#1
0
文件: api_base.py 项目: wrtcoder/wiwo
 def __init__(self, number_of_values):
     self.dict = multiprocessing.Manager().dict()
     self.number_of_values = number_of_values
示例#2
0
def main(pop_name: str,
         version: int,
         iterations: int = 100,
         unused_cpu: int = 2,
         use_backup: bool = False):
    # Check if valid population name
    if pop_name not in SUPPORTED:
        raise Exception(f"Population '{pop_name}' not supported!")
    # Create the population
    cfg = get_config()
    folder = get_folder(experiment_id=8)
    pop = Population(
        name=f'{pop_name}/v{version}',
        config=cfg,
        folder_name=folder,
        use_backup=use_backup,
    )

    # Replace the population's initial population with the requested topologies genomes
    if pop.generation == 0:
        for g_id in pop.population.keys():
            pop.population[g_id] = get_topology(pop_name, gid=g_id, cfg=cfg)
        pop.species.speciate(config=pop.config,
                             population=pop.population,
                             generation=pop.generation,
                             logger=pop.log)

    pop.log(f"\n\n\n===> RUNNING EXPERIMENT 8 FOR POPULATION '{pop}' <===\n")
    # Set games and environment used for training and evaluation
    games_train, games_eval = get_game_ids(experiment_id=8)
    train_env = get_multi_env(config=cfg)
    eval_env = get_multi_env(config=cfg)
    eval_env.set_games(games_eval, noise=False)

    for iteration in range(iterations):
        # Train the population for a single iteration
        train_env.set_games(games_train, noise=True)

        # Prepare the generation's reporters for the generation
        pop.reporters.start_generation(gen=pop.generation, logger=pop.log)

        # Fetch the dictionary of genomes
        genomes = list(iteritems(pop.population))

        # Initialize the evaluation-pool
        pool = mp.Pool(mp.cpu_count() - unused_cpu)
        manager = mp.Manager()
        return_dict = manager.dict()

        for genome in genomes:
            pool.apply_async(func=train_env.eval_genome,
                             args=(genome, return_dict))
        pool.close()  # Close the pool
        pool.join()  # Postpone continuation until everything is finished

        # Calculate the fitness from the given return_dict
        fitness = calc_pop_fitness(
            fitness_cfg=pop.config.evaluation,
            game_cfg=cfg.game,
            game_obs=return_dict,
            gen=pop.generation,
        )
        for i, genome in genomes:
            genome.fitness = fitness[i]

        # Update the population's best_genome
        best = None
        for g in itervalues(pop.population):
            if best is None or g.fitness > best.fitness: best = g
        pop.reporters.post_evaluate(population=pop.population,
                                    species=pop.species,
                                    best_genome=best,
                                    logger=pop.log)

        # Update the population's best_genome
        genomes = sorted(pop.population.items(),
                         key=lambda x: x[1].fitness,
                         reverse=True)
        pop.best_fitness[pop.generation] = genomes[0][1].fitness
        pop.best_genome_hist[pop.generation] = genomes[0]
        pop.best_genome = best

        # Let population evolve
        evolve(pop, pop_name)

        # End generation
        pop.reporters.end_generation(population=pop.population,
                                     name=str(pop),
                                     species_set=pop.species,
                                     logger=pop.log)

        # Save the population every ten generations
        if pop.generation % 10 == 0: pop.save()
示例#3
0
def main(filepath):
    #designer for fps=60 and goPro7 mp4 Video
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)
    logging.info("{0} System Start at {1}".format(
        str(os.getpid()),
        datetime.now().strftime('%Y%m%d_%H%M%S')))
    imgs = mp.Manager().Queue()
    imgds = mp.Manager().Queue()
    detector_ready = mp.Manager().Value('i', False)
    dn_width = mp.Manager().Value('i', 416)
    dn_height = mp.Manager().Value('i', 416)
    mpdarknet = mp.Process(target=detector,
                           args=(
                               imgs,
                               imgds,
                               detector_ready,
                               dn_width,
                               dn_height,
                           ))
    mpdarknet.start()

    while detector_ready.value == False:
        logging.info("Waiting for detector ready...re-check in 10s")
        time.sleep(10)
    logging.debug("now checking the module")
    kmpoints = getkmpoints()
    logging.info("loaded " + str(len(kmpoints)) + " of HMP(百公尺樁)")
    if (len(kmpoints) < 2):
        logging.warning("not enough HMP for count\r\nExit.....")
        sys.exit(1)
    points = getpoints(filepath, skip=False)
    logging.info("find " + str(len(points)) + " GPS points in file")
    logging.info("Record time taken " + str(gettimediff(points)))
    if (len(points) < 2):
        logging.warning("not enough GPS point\r\nExit.....")
        sys.exit(1)

    mpsavedat = mp.Process(target=savedata, args=(
        imgds,
        kmpoints,
        filepath,
    ))
    mpsavedat.start()

    cap = cv2.VideoCapture(filepath)
    if (not cap.isOpened()):
        logging.warning("could not open :", filepath)
        sys.exit(1)
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    video_fps = round(cap.get(cv2.CAP_PROP_FPS), 2)
    logging.info("File: {0} FPS: {1} Total Frames: {2}".format(
        filepath, str(video_fps), str(total_frames)))
    frame_sec = round(1 / video_fps, 6)
    count = 0
    cur_frame = 0
    video_time = 0

    for p in points:
        while (imgs.qsize() > 700):
            logging.warning("Pause for waiting detector processing 60s")
            time.sleep(60)
        if (count % 10 == 0):
            logging.info("{0} imgs in the Queue".format(str(imgs.qsize())))
        count = count + 1
        logging.info("Start to processing point " + str(count))
        cur_point = SimpleNamespace(lat=p.latitude,
                                    lon=p.longitude,
                                    time=p.time)
        #TRA meter
        cur_point.hmd = kmplush(kmpoints, cur_point)
        #
        logging.debug(cur_point.__dict__)

        one_sec = video_time
        while ((video_time - one_sec) < 1):
            cur_frame = cap.get(cv2.CAP_PROP_POS_FRAMES)
            video_time = cur_frame * frame_sec
            while (cur_frame % 4 != 0 and cur_frame > 0):
                # logging.debug("skip frame " + str(cur_frame) + " Video_Time:" + str(video_time) + " last_time:" + str(one_sec))
                cap.grab()
                cur_frame = cap.get(cv2.CAP_PROP_POS_FRAMES)
                video_time = cur_frame * frame_sec
            if ((count == 1 and (video_time + frame_sec - one_sec) > 0.5)
                    or (video_time + frame_sec - one_sec) > 1):
                break
            logging.debug("processing frame " + str(cur_frame) +
                          " Video_Time:" + str(video_time) + " one_sec:" +
                          str(video_time - one_sec))
            success, frame = cap.read()
            frame = cv2.flip(frame, flipCode=-1)
            if (not success):
                logging.warning("frame {0} read fail skip.....".format(
                    str(cur_frame)))
            else:
                job = cur_point
                job.frame = cv2.resize(frame,
                                       (dn_width.value, dn_height.value),
                                       interpolation=cv2.INTER_AREA)[..., ::-1]
                job.frame_count = cur_frame
                imgs.put(job)

        # Frame base point switch for 60 FPS
        # for frame_in_second in range(15):
        #     cur_frame = cap.get(cv2.CAP_PROP_POS_FRAMES)
        #     if(count == 1 and frame_in_second == 8):
        #         break
        #     while(cur_frame % 4 != 0 and cur_frame > 0):
        #         cap.grab()
        #         cur_frame = cap.get(cv2.CAP_PROP_POS_FRAMES)
        #     logging.debug("processing frame " + str(cur_frame))
        #     success, frame = cap.read()
        #     frame = cv2.flip(frame, flipCode=-1)
        #     if(not success):
        #         logging.warning("frame {0} read fail skip.....".format(str(cur_frame)))
        #     else:
        #         job = cur_point
        #         job.frame = cv2.resize(frame, (dn_width.value,dn_height.value), interpolation=cv2.INTER_AREA)[...,::-1]
        #         job.frame_count = cur_frame
        #         imgs.put(job)
        if (cur_frame >= total_frames or count == len(points)):
            break
    while (not (imgs.empty() and imgds.empty())):
        logging.debug("Waiting for all jobs done.....")
        time.sleep(10)
    logging.info("Process done")
    mpdarknet.terminate()
    mpsavedat.terminate()
示例#4
0
                lock.release()
                print("\nREACHED GOAL of 50K Steps after",
                      episod_counter.value, "episodes, in",
                      time.time() - time_start, "seconds \n")

            #time.sleep(1)

        print("Ending Worker:", worker)


if __name__ == "__main__":

    # Create Shared Variables
    lock = mp.Lock(
    )  # LOCK object to ensure only one Processes can access the locked object
    manager = mp.Manager()  # MANGER object to create the shared variables
    collect_obs = manager.Queue(2)  # Collector of Episods from executorS
    #memory_buffer = manager.Queue() # Central Memmory Buffer -- implemented as numpy array within memorizer
    collect_examples = manager.Queue(2)  # Collector of Training Examples
    episod_counter = manager.Value('i', 0)  # Number of Finalized Episodes
    step_counter = manager.Value('i', 0)  # Number of Finalized Steps
    internal_step_counter_best = manager.Value(
        'i', 0)  # Length of the longest episod
    executor_model = manager.list()
    goal_reached = manager.Value('i', 0)  # Number of Finalized Steps

    # Set Parallel Processes
    env_processes = []

    cores = mp.cpu_count()
    #cores = 3 # -1 as presumably 1 mp.manager takes over one process/core
示例#5
0
    def __init__(self,
                 iperf_config,
                 identifier,
                 wlan_device=None,
                 access_point=None,
                 port_range_start=5201):

        self.identifier = identifier
        self.log = tracelogger.TraceLogger(
            WmmTransceiverLoggerAdapter(logging.getLogger(),
                                        {'identifier': self.identifier}))
        # WlanDevice or AccessPoint, that is used as the transceiver. Only one
        # will be set. This helps consolodate association, setup, teardown, etc.
        self.wlan_device = wlan_device
        self.access_point = access_point

        # Parameters used to create IPerfClient and IPerfServer objects on
        # device
        self._iperf_config = iperf_config
        self._test_interface = self._iperf_config.get('test_interface')
        self._port_range_start = port_range_start
        self._next_server_port = port_range_start

        # Maps IPerfClients, used for streams from this device, to True if
        # available, False if reserved
        self._iperf_clients = {}

        # Maps IPerfServers, used to receive streams from other devices, to True
        # if available, False if reserved
        self._iperf_servers = {}

        # Maps ports of servers, which are provided to other transceivers, to
        # the actual IPerfServer objects
        self._iperf_server_ports = {}

        # Maps stream UUIDs to IPerfClients reserved for that streams use
        self._reserved_clients = {}

        # Maps stream UUIDs to (WmmTransceiver, IPerfServer) tuples, where the
        # server is reserved on the transceiver for that streams use
        self._reserved_servers = {}

        # Maps with shared memory functionality to be used across the parallel
        # streams. active_streams holds UUIDs of streams that are currently
        # running on this device (mapped to True, since there is no
        # multiprocessing set). stream_results maps UUIDs of streams completed
        # on this device to IPerfResult results for that stream.
        self._manager = multiprocessing.Manager()
        self._active_streams = self._manager.dict()
        self._stream_results = self._manager.dict()

        # Holds parameters for streams that are prepared to run asynchronously
        # (i.e. resources have been allocated). Maps UUIDs of the future streams
        # to a dict, containing the stream parameters.
        self._pending_async_streams = {}

        # Set of UUIDs of asynchronous streams that have at least started, but
        # have not had their resources reclaimed yet
        self._ran_async_streams = set()

        # Set of stream parallel process, which can be joined if completed
        # successfully, or  terminated and joined in the event of an error
        self._running_processes = set()
示例#6
0
def main(domain, threads, savefile, ports, silent, verbose, enable_bruteforce,
         engines):
    bruteforce_list = set()
    search_list = set()

    if is_windows:
        subdomains_queue = list()
    else:
        subdomains_queue = multiprocessing.Manager().list()

    # Check Bruteforce Status
    if enable_bruteforce or enable_bruteforce is None:
        enable_bruteforce = True

    # Validate domain
    domain_check = re.compile(
        "^(http|https)?[a-zA-Z0-9]+([\-\.]{1}[a-zA-Z0-9]+)*\.[a-zA-Z]{2,}$")
    if not domain_check.match(domain):
        if not silent:
            print(R + "Error: Please enter a valid domain" + W)
        return []

    if not domain.startswith('http://') or not domain.startswith('https://'):
        domain = 'http://' + domain

    parsed_domain = urlparse.urlparse(domain)

    if not silent:
        print(B +
              "[-] Enumerating subdomains now for %s" % parsed_domain.netloc +
              W)

    if verbose and not silent:
        print(
            Y +
            "[-] verbosity is enabled, will show the subdomains results in realtime"
            + W)

    supported_engines = {
        'baidu': BaiduEnum,
        'yahoo': YahooEnum,
        'google': GoogleEnum,
        'bing': BingEnum,
        'ask': AskEnum,
        'netcraft': NetcraftEnum,
        'dnsdumpster': DNSdumpster,
        'virustotal': Virustotal,
        'threatcrowd': ThreatCrowd,
        'ssl': CrtSearch,
        'passivedns': PassiveDNS
    }

    chosenEnums = []

    if engines is None:
        chosenEnums = [
            BaiduEnum, YahooEnum, GoogleEnum, BingEnum, AskEnum, NetcraftEnum,
            DNSdumpster, Virustotal, ThreatCrowd, CrtSearch, PassiveDNS
        ]
    else:
        engines = engines.split(',')
        for engine in engines:
            if engine.lower() in supported_engines:
                chosenEnums.append(supported_engines[engine.lower()])

    # Start the engines enumeration
    enums = [
        enum(domain, [], q=subdomains_queue, silent=silent, verbose=verbose)
        for enum in chosenEnums
    ]
    for enum in enums:
        enum.start()
    for enum in enums:
        enum.join()

    subdomains = set(subdomains_queue)
    for subdomain in subdomains:
        search_list.add(subdomain)

    if enable_bruteforce:
        if not silent:
            print(G + "[-] Starting bruteforce module now using subbrute.." +
                  W)
        record_type = False
        path_to_file = os.path.dirname(os.path.realpath(__file__))
        subs = os.path.join(path_to_file, 'subbrute', 'names.txt')
        resolvers = os.path.join(path_to_file, 'subbrute', 'resolvers.txt')
        process_count = threads
        output = False
        json_output = False
        bruteforce_list = subbrute.print_target(parsed_domain.netloc,
                                                record_type, subs, resolvers,
                                                process_count, output,
                                                json_output, search_list,
                                                verbose)

    subdomains = search_list.union(bruteforce_list)

    if subdomains:
        subdomains = sorted(subdomains, key=subdomain_sorting_key)

        if savefile:
            write_file(savefile, subdomains)

        if not silent:
            print(Y +
                  "[-] Total Unique Subdomains Found: %s" % len(subdomains) +
                  W)

        if ports:
            if not silent:
                print(G +
                      "[-] Start port scan now for the following ports: %s%s" %
                      (Y, ports) + W)
            ports = ports.split(',')
            pscan = portscan(subdomains, ports)
            pscan.run()

        elif not silent:
            for subdomain in subdomains:
                print(G + subdomain + W)
    return subdomains
示例#7
0
 SWISS_PROT_MAP  = sys.argv[4]
 gene_map_file   = sys.argv[5]
 freq_file       = sys.argv[6]
 hq_output       = sys.argv[7]
 lq_output       = sys.argv[8]
 
 
 
 gene_length_dict = import_gene_map(gene_map_file)
 freq_dict = import_freq_file(freq_file)
 print(dt.today(), "size of freq dict:", len(freq_dict))
 
 ec_process_list = []
 #-----------------------------------------
 # import the data
 manager = mp.Manager()
 diamond_ec_lq_mgr_dict = manager.dict()
 diamond_ec_hq_mgr_dict = manager.dict()
 swissprot_map_dict = create_swissprot_map(SWISS_PROT_MAP)
     
 diamond_ec_process = mp.Process(
     target = import_diamond_ec_v2, 
     args = (diamond_file, swissprot_map_dict, gene_length_dict, diamond_ec_lq_mgr_dict, diamond_ec_hq_mgr_dict)
 )
 diamond_ec_process.start()
 ec_process_list.append(diamond_ec_process)
 
 priam_ec_hq_mgr_dict = manager.dict()
 priam_prob_hq_mgr_dict = manager.dict()
 priam_ec_hq_process = mp.Process(
     target = import_priam_ec, 
示例#8
0
def main(args):
    clear()
    CONFIG = ConfigManager()
    PARSER = Parser()
    # change to custom download dir if user has specified
    if args.output:
        CONFIG.download_dir = args.output

    QUALITIES = (CONFIG.quality
                 if not args.resolution else args.resolution).split(",")
    # validating qualities object to fit format:
    if not verify_quality(QUALITIES):
        print("Bad resolution specified, aborting...")
        exit(1)

    downloads = []
    if args.download:  # we want to set the correct title
        title = Parser().get_episodes(args.download)[0]["title"]

    downloads = multiprocessing.Manager().dict()
    procs = []
    l = multiprocessing.Lock()

    # all the variables set, lets start with the iterations:
    for show, last_watched in [(title,
                                0)] if args.download else CONFIG.subscriptions:
        proc = multiprocessing.Process(target=fetch_episodes,
                                       args=(PARSER, show, last_watched,
                                             downloads, args, l))

        proc.start()
        procs.append(proc)

    for proc in procs:
        proc.join()

    downloads_flat = flatten_dict(downloads)

    # after we iterated on all of the shows we have a list of stuff to download.
    # but first we must check the list if it contains data:
    if not downloads_flat:
        if args.download:  # we want to display a different message in each case.
            print(fg(1) + "Couldn't find specified anime. Exiting" + fg.rs)
        else:
            print(fg(1) + 'No new episodes were found. Exiting ' + fg.rs)
        exit(1)  # arguably should be exit code 0

    # summerizing info about the download
    reprint_results(downloads, QUALITIES)
    inp = input(
        f'{fg(3)}\nwould you like to re-arrange the downloads? (Return for default) {fg.rs}'
    )
    if inp is "":  # continue as usually.
        pass

    elif inp in ("Y", "y", "yes", "Yes"):  # do the re-arrangment
        print(
            "press SPACE to toggle select a show, use UP/DOWN to arrange, when done - press RETURN"
        )

        # set some helpful variables
        shows_download_keys = downloads.keys()
        current_index = 0
        selected = False

        while True:
            # printing all of the info from before, to reset the new data
            reprint_results(downloads, QUALITIES)
            print(
                f'{fg(3)}\nwould you like to re-arrange the downloads? (Return for default) {fg.rs}',
                inp)
            print(
                "press SPACE to toggle select a show, use UP/DOWN to arrange, when done - press RETURN"
            )
            for i, show in enumerate(
                    shows_download_keys
            ):  # here we set the colors of the new data
                if i == current_index:
                    if selected:
                        print(f"{fg(12)}{i+1}. {show}{fg.rs}")
                    else:
                        print(f"{fg(3)}{i+1}. {fg.rs}{show}")
                else:
                    print(f"{i+1}. {show}")

            keypress = getKey()
            if keypress == " ":  # SPACE
                selected = not selected

            elif keypress in ("up", "down"):  # ARROWS (ANY)
                # this has to be done no matter which arrow key is pressed:
                if selected:
                    removed = shows_download_keys.pop(current_index)

                # this is how it works to detect the individual arrows.
                # https://www.daniweb.com/posts/jump/1087957
                if keypress == "up":  # UP
                    if current_index > 0:
                        current_index -= 1

                elif keypress == "down":  # DOWN
                    if current_index < len(shows_download_keys) - (
                            0 if selected else 1):
                        current_index += 1

                # once we know what key was pressed, we need to return the value we removed
                if selected:
                    shows_download_keys.insert(current_index, removed)

            elif keypress == "\r":  # RETURN
                for show in shows_download_keys:
                    downloads[show] = downloads.pop(show)

                downloads_flat = flatten_dict(downloads)
                print(
                    f"{fg(3)}The download order will be as follows:{fg.rs}\n")
                for episode in downloads_flat:
                    for quality in QUALITIES:
                        print(
                            f'{episode["title"]} - {episode["episode"]} [{quality}p].mkv'
                        )
                break

    else:
        print(fg(1) + 'aborting download\n' + fg.rs)
        exit(1)

    #l et the downloads begin!
    abs_path = os.path.expanduser(CONFIG.download_dir)
    for episode_obj in downloads_flat:
        download(episode_obj, QUALITIES, abs_path)

        if not args.download:
            CONFIG.conf["subscriptions"][episode_obj["title"].lower(
            )] = episode_obj["episode"].lstrip("0")
            with open(os.path.join(CONFIG.dir, CONFIG.file), "w") as f:
                CONFIG.conf.write(f)
示例#9
0
# -*- coding:utf8 -*-
import stockanalysis as sa
import os
import datetime
from stockanalysis import glbdata
import tushare as ts
import pandas as pd
import multiprocessing
import string

if __name__ == '__main__':
    validation = multiprocessing.Manager().list([('000000', False)])
    # Read in stock code
    stock_code_list = pd.read_csv(
        os.getenv('TRADERHOME') + 'container/puretxt/stock_code.csv')
    i = 1

    #start_date = datetime.date(2014, 1, 5)
    #today = datetime.date.today()
    #diff = today - start_date
    #total_days = diff.days
    start_date = '2014-06-01'
    nr_of_try = 0
    while (nr_of_try < 5):
        data = ts.get_k_data('000001', start=start_date, retry_count=3)
        total_days = data['date'].size
        if (nr_of_try < 1):
            pre_total_days = total_days
            nr_of_try += 1
        else:
            if (pre_total_days == total_days):
示例#10
0
文件: subfi.py 项目: R3D-user/subfi
def main(domain, silent):
    bruteforce_list = set()
    search_list = set()

    subdomains_queue = multiprocessing.Manager().list()

    # Validate domain
    domain_check = re.compile(
        "^(http|https)?[a-zA-Z0-9]+([\-\.]{1}[a-zA-Z0-9]+)*\.[a-zA-Z]{2,}$")
    if not domain_check.match(domain):
        if not silent:
            print(R + "Error: გთხოვ მიუთითე სწორი ვებ-გვერდის მისამართი" + W)
        return []

    if not domain.startswith('http://') or not domain.startswith('https://'):
        domain = 'http://' + domain

    parsed_domain = urlparse.urlparse(domain)

    print(Y + "[+] მიმდინარეობს ქვედომენების ძებნა მისამართზე %s" %
          parsed_domain.netloc + W)

    supported_engines = {
        'baidu': BaiduEnum,
        'yahoo': YahooEnum,
        'google': GoogleEnum,
        'bing': BingEnum,
        'ask': AskEnum,
        'netcraft': NetcraftEnum,
        'dnsdumpster': DNSdumpster,
        'virustotal': Virustotal,
        'threatcrowd': ThreatCrowd,
        'ssl': CrtSearch,
        'passivedns': PassiveDNS
    }

    chosenEnums = [
        BaiduEnum, YahooEnum, GoogleEnum, BingEnum, AskEnum, NetcraftEnum,
        DNSdumpster, Virustotal, ThreatCrowd, CrtSearch, PassiveDNS
    ]
    # Start the engines enumeration
    enums = [
        enum(domain, [], q=subdomains_queue, silent=silent)
        for enum in chosenEnums
    ]
    for enum in enums:
        enum.start()
    for enum in enums:
        enum.join()

    subdomains = set(subdomains_queue)
    for subdomain in subdomains:
        search_list.add(subdomain)

    subdomains = search_list.union(bruteforce_list)

    if subdomains:
        subdomains = sorted(subdomains, key=subdomain_sorting_key)
        print(Y + "[+] სულ მოიძებნა: %s ქვედომენი" % len(subdomains) + W)

        for subdomain in subdomains:
            print(G + subdomain + W)
    return subdomains
示例#11
0
def test_proxy(proxy):
    try:
        print('Proxy:', proxy)
        httpproxy_handler = urllib.request.ProxyHandler(proxy)
        opener = urllib.request.build_opener(httpproxy_handler)
        req = urllib.request.Request(url, headers=random.choice(headers))
        for _ in range(5):
            html = opener.open(req, timeout=5).read().decode()
    except:
        print('Fail.')
    else:
        print('Success.')
        proxy_list.append(proxy)


proxy_list = mp.Manager().list()
url = 'https://book.douban.com/'
with open(path + '/Data/proxies.txt', 'r') as f:
    proxy_txt = f.read().split('\n')
proxies = [{"https": proxy} for proxy in proxy_txt]
# with open('proxies.json', 'r') as f:
#     proxies = json.loads(f.read())

pool = mp.Pool(200)
pool.map(test_proxy, proxies)
pool.close()
pool.join()

proxy_list = [proxy for proxy in proxy_list]
with open(path + '/Data/proxies.json', 'w') as f:
    json.dump(proxy_list, f, indent=4)
示例#12
0
 def __init__(self):
     # Using a Manager here to create the Queue resolves timeout
     # issue on Windows.
     self.result_queue = mproc.Manager().Queue(1)
示例#13
0
 async def CAPTCHA_verify_api_check(self,
                                    CAPTCHA,
                                    jsfuncstr,
                                    use_real=True):
     if CAPTCHA in self._CAPTCHA_reused and self._CAPTCHA_reused[CAPTCHA][
             "expire"] > time.time():
         self._CAPTCHA_reused = {
             k: v
             for (k, v) in self._CAPTCHA_reused.items()
             if v["expire"] >= time.time()
         }
         del self._CAPTCHA_reused[CAPTCHA]
         with open(self.config_path, "w") as config:
             config.write(
                 json.dumps(self.__dict__,
                            ensure_ascii=False,
                            indent=2,
                            default=lambda o: None))
         return True
     try:
         check_func = js2py.eval_js(jsfuncstr)
     except Exception as e:
         raise self.generateError(
             400,
             "JsException",
             e,
             error_url="https://github.com/PiotrDabkowski/Js2Py")
     timeout = 0.1
     if self._CAPTCHA_api_response_example == None or use_real:
         timeout = 0.2
         response = await self.CAPTCHA_check(CAPTCHA,
                                             self.CAPTCHA_verify_api)
     else:
         response = self._CAPTCHA_api_response_example
     try:
         func_ret = multiprocessing.Manager().dict()
         func_ret.update({"val": None, "err": None})
         prcs = multiprocessing.Process(target=self.check_func_wrapper(
             check_func, response, func_ret),
                                        args=[response, func_ret])
         prcs.start()
         prcs.join(timeout=timeout)
         if prcs.is_alive():
             prcs.terminate()
             raise TimeoutError(
                 "TimeoutError: Maximum execution time exceeded in your response_check_function."
             )
     except Exception as e:
         raise self.generateError(
             400, "JsException", e,
             "https://github.com/PiotrDabkowski/Js2Py")
     if func_ret["err"] != None:
         raise self.generateError(
             400, "JsRuntimeError", func_ret["err"],
             "https://github.com/PiotrDabkowski/Js2Py")
     if func_ret["val"] == True:
         return True
     elif type(func_ret["val"]) == str:
         return func_ret["val"]
     else:
         raise self.generateError(
             400, "Incompatible function",
             "Your function must return true(success) or string(error_msg).",
             "https://github.com/HuJK/O365-UC")
示例#14
0
config = yaml.load(open(config_file, 'r'))
pprint(config)
root = "/mnt/cephfs_new_wj/lab_ad_idea/maoyiming/data/CCComparis_vis"
imgs = os.listdir(root)
img_width = config['data_format']['img_width']
img_height = config['data_format']['img_height']
data_keys = config['data_format']['keys']
data_begin = config['data_format']['data_begin_index']
metrics = config[sys.argv[1]]['metrics']
# print(metrics)
metric_config = yaml.load(open("configs/metrics.yaml"))['metrics']
# pprint(metric_config)
print("==> Do", len(metrics), "metrics in", len(data_keys) - data_begin, "methods")

# init avgmeter
meter_dict = mp.Manager().dict()
for data_method in data_keys[data_begin:]:
    for mes in metrics:
        meter_dict["{}_{}".format(data_method, mes)] = AverageMeter()

gen_status = mp.Manager().dict()
n_procs = 1
task_num = 8570
queue = mp.Queue()


# print(meter_dict.items())


def mp_run(batch_size):
    for i in range(task_num // batch_size):
示例#15
0
def grid_search(create_model,
                tr,
                k_fold,
                epochs,
                batch_size,
                param_grid,
                monitor_value='val_loss',
                vl=None,
                ts=None,
                path_results=None,
                n_threads=None,
                tol=None,
                verbose=False,
                shuffle=False):
    """

    @param create_model: function used to create the neural network
    @param tr: pair (X_train, Y_train)
    @param k_fold: number of folds used for K-fold cross validation technique
    @param epochs: maximum number of epoch
    @param batch_size: size of batch
    @param param_grid: dictionary used to set the hyperparameters
    @param monitor_value: criterion used to select the best model
    @param vl: pair (X_val, Y_val)
    @param ts: pair (X_test, Y_test) It is used only for plots
    @param path_results: path used to write the random search result
    @param n_threads: number of threads
    @param tol: tolerance
    @param verbose: used for debug
    @param shuffle: True if you want shuffle training data (at each epoch),
                    False otherwise
    @return: best model under the monitor_value criterion
    """
    print('[+] Grid search is started')
    X_train, Y_train = tr

    if n_threads is None:
        n_threads = multiprocessing.cpu_count()
    pool = multiprocessing.Pool(processes=n_threads)
    results = multiprocessing.Manager().list()

    num_task = len(param_grid)
    current_task = 1

    for row in param_grid:
        hyperaparams = {
            list(item.keys())[0]: list(item.values())[0]
            for item in row
        }
        pool.apply_async(func=run,
                         args=(create_model, tr, vl, ts, results, verbose, tol,
                               epochs, hyperaparams['batch_size'],
                               hyperaparams, monitor_value, shuffle, k_fold,
                               (current_task, num_task)))
        current_task += 1

    pool.close()
    pool.join()

    l_results = list(results)
    l_results.sort(key=lambda x: x[0])

    if verbose:
        for val, hyperaparams, nn in l_results:
            print("{}: {}".format(monitor_value, val))

    if path_results is not None:
        write_csv(l_results, path_results, param_grid.keys(), monitor_value)

    _, best_hyps, best_model = l_results[0]

    if verbose:
        print("Best result with: ")
        print_hyperparams(best_hyps)

    print('[+] Grid search is finished')

    _, _, best_model = l_results[0]

    return best_model
示例#16
0
    def train(ctx):
        if isinstance(ctx, mx.Context):
            ctx = [ctx]
        if opt.resume_params is '':
            if 'ShuffleNas' in model_name:
                net._initialize(ctx=ctx)
            else:
                net.initialize(mx.init.MSRAPrelu(), ctx=ctx)

        if opt.no_wd:
            for k, v in net.collect_params('.*beta|.*gamma|.*bias').items():
                v.wd_mult = 0.0

        trainer = gluon.Trainer(net.collect_params(), optimizer,
                                optimizer_params)
        if opt.resume_states is not '':
            trainer.load_states(opt.resume_states)

        if opt.label_smoothing or opt.mixup:
            sparse_label_loss = False
        else:
            sparse_label_loss = True
        if distillation:
            L = gcv.loss.DistillationSoftmaxCrossEntropyLoss(
                temperature=opt.temperature,
                hard_weight=opt.hard_weight,
                sparse_label=sparse_label_loss)
        else:
            L = gluon.loss.SoftmaxCrossEntropyLoss(
                sparse_label=sparse_label_loss)

        best_val_score = 1

        def train_epoch(pool=None,
                        pool_lock=None,
                        shared_finished_flag=None,
                        use_pool=False):
            btic = time.time()
            for i, batch in enumerate(train_data):
                if i == num_batches:
                    if use_pool:
                        shared_finished_flag.value = True
                    return
                data, label = batch_fn(batch, ctx)

                if opt.mixup:
                    lam = np.random.beta(opt.mixup_alpha, opt.mixup_alpha)
                    if epoch >= opt.num_epochs - opt.mixup_off_epoch:
                        lam = 1
                    data = [lam * X + (1 - lam) * X[::-1] for X in data]

                    if opt.label_smoothing:
                        eta = 0.1
                    else:
                        eta = 0.0
                    label = mixup_transform(label, classes, lam, eta)

                elif opt.label_smoothing:
                    hard_label = label
                    label = smooth(label, classes)

                if distillation:
                    teacher_prob = [nd.softmax(teacher(X.astype(opt.dtype, copy=False)) / opt.temperature) \
                                    for X in data]

                with ag.record():
                    if model_name == 'ShuffleNas' and use_pool:
                        cand = None
                        while cand is None:
                            if len(pool) > 0:
                                with pool_lock:
                                    cand = pool.pop()
                                    logger.debug('[Trainer]' + '-' * 40)
                                    logger.debug("Time: {}".format(
                                        time.time()))
                                    logger.debug("Block choice: {}".format(
                                        cand['block_list']))
                                    logger.debug("Channel choice: {}".format(
                                        cand['channel_list']))
                                    logger.debug(
                                        "Flop: {}M, param: {}M".format(
                                            cand['flops'], cand['model_size']))
                            else:
                                time.sleep(1)

                        full_channel_masks = [
                            cand['channel'].as_in_context(ctx_i)
                            for ctx_i in ctx
                        ]
                        outputs = [
                            net(X.astype(opt.dtype, copy=False), cand['block'],
                                channel_mask) for X, channel_mask in zip(
                                    data, full_channel_masks)
                        ]
                    elif model_name == 'ShuffleNas':
                        block_choices = net.random_block_choices(
                            select_predefined_block=False, dtype=opt.dtype)
                        if opt.cs_warm_up:
                            full_channel_mask, channel_choices = net.random_channel_mask(
                                select_all_channels=opt.use_all_channels,
                                epoch_after_cs=epoch - opt.epoch_start_cs,
                                dtype=opt.dtype,
                                ignore_first_two_cs=opt.ignore_first_two_cs)
                        else:
                            full_channel_mask, channel_choices = net.random_channel_mask(
                                select_all_channels=opt.use_all_channels,
                                dtype=opt.dtype,
                                ignore_first_two_cs=opt.ignore_first_two_cs)

                        full_channel_masks = [
                            full_channel_mask.as_in_context(ctx_i)
                            for ctx_i in ctx
                        ]
                        outputs = [
                            net(X.astype(opt.dtype, copy=False), block_choices,
                                channel_mask) for X, channel_mask in zip(
                                    data, full_channel_masks)
                        ]
                    else:
                        outputs = [
                            net(X.astype(opt.dtype, copy=False)) for X in data
                        ]

                    if distillation:
                        loss = [
                            L(yhat.astype('float32', copy=False),
                              y.astype('float32', copy=False),
                              p.astype('float32', copy=False))
                            for yhat, y, p in zip(outputs, label, teacher_prob)
                        ]
                    else:
                        loss = [
                            L(yhat, y.astype(opt.dtype, copy=False))
                            for yhat, y in zip(outputs, label)
                        ]
                for l in loss:
                    l.backward()
                trainer.step(batch_size, ignore_stale_grad=True)

                if opt.mixup:
                    output_softmax = [nd.SoftmaxActivation(out.astype('float32', copy=False)) \
                                    for out in outputs]
                    train_metric.update(label, output_softmax)
                else:
                    if opt.label_smoothing:
                        train_metric.update(hard_label, outputs)
                    else:
                        train_metric.update(label, outputs)

                if opt.log_interval and not (i + 1) % opt.log_interval:
                    train_metric_name, train_metric_score = train_metric.get()
                    logger.info(
                        'Epoch[%d] Batch [%d]\tSpeed: %f samples/sec\t%s=%f\tlr=%f'
                        % (epoch, i, batch_size * opt.log_interval /
                           (time.time() - btic), train_metric_name,
                           train_metric_score, trainer.learning_rate))
                    btic = time.time()
            return

        def pool_maintainer(pool,
                            pool_lock,
                            shared_finished_flag,
                            upper_flops=sys.maxsize,
                            upper_params=sys.maxsize):
            lookup_table = None
            if opt.flop_param_method == 'lookup_table':
                lookup_table = load_lookup_table(opt.use_se,
                                                 opt.last_conv_after_pooling,
                                                 opt.channels_layout,
                                                 nas_root='./')
            while True:
                if shared_finished_flag.value:
                    break
                if len(pool) < 5:
                    candidate = dict()
                    block_choices, block_choices_list = net.random_block_choices(
                        select_predefined_block=False,
                        dtype=opt.dtype,
                        return_choice_list=True)
                    if opt.cs_warm_up:
                        full_channel_mask, channel_choices_list = net.random_channel_mask(
                            select_all_channels=opt.use_all_channels,
                            epoch_after_cs=epoch - opt.epoch_start_cs,
                            dtype=opt.dtype,
                            ignore_first_two_cs=opt.ignore_first_two_cs,
                        )
                    else:
                        full_channel_mask, channel_choices_list = net.random_channel_mask(
                            select_all_channels=opt.use_all_channels,
                            dtype=opt.dtype,
                            ignore_first_two_cs=opt.ignore_first_two_cs)

                    if opt.flop_param_method == 'symbol':
                        flops, model_size, _, _ = \
                            get_flop_param_forward(block_choices_list, channel_choices_list,
                                                 use_se=opt.use_se, last_conv_after_pooling=opt.last_conv_after_pooling,
                                                 channels_layout=opt.channels_layout)
                    elif opt.flop_param_method == 'lookup_table':
                        flops, model_size = get_flop_param_lookup(
                            block_choices_list, channel_choices_list,
                            lookup_table)
                    else:
                        raise ValueError(
                            'Unrecognized flop param calculation method: {}'.
                            format(opt.flop_param_method))

                    candidate['block'] = block_choices
                    candidate['channel'] = full_channel_mask
                    candidate['block_list'] = block_choices_list
                    candidate['channel_list'] = channel_choices_list
                    candidate['flops'] = flops
                    candidate['model_size'] = model_size

                    if flops > upper_flops or model_size > upper_params:
                        continue

                    with pool_lock:
                        pool.append(candidate)
                        logger.debug(
                            "[Maintainer] Add one good candidate. currently pool size: {}"
                            .format(len(pool)))

        manager = multiprocessing.Manager()
        cand_pool = manager.list()
        p_lock = manager.Lock()
        for epoch in range(opt.resume_epoch, opt.num_epochs):
            if epoch >= opt.epoch_start_cs:
                opt.use_all_channels = False
            tic = time.time()
            if opt.use_rec:
                train_data.reset()
            train_metric.reset()

            if model_name == 'ShuffleNas' and opt.train_upper_constraints:
                constraints = opt.train_upper_constraints.split('-')
                # opt.train_upper_constraints = 'flops-300-params-4.5'
                assert len(constraints) == 4 and constraints[
                    0] == 'flops' and constraints[2] == 'params'
                upper_flops = float(constraints[1]) if float(
                    constraints[1]) != 0 else sys.maxsize
                upper_params = float(constraints[3]) if float(
                    constraints[3]) != 0 else sys.maxsize
                finished = Value(c_bool, False)
                logger.debug(
                    "===== DEBUG ======\n"
                    "Train SuperNet with Flops less than {}, params less than {}"
                    .format(upper_flops, upper_params))
                pool_process = multiprocessing.Process(target=pool_maintainer,
                                                       args=[
                                                           cand_pool, p_lock,
                                                           finished,
                                                           upper_flops,
                                                           upper_params
                                                       ])
                pool_process.start()
                train_epoch(pool=cand_pool,
                            pool_lock=p_lock,
                            shared_finished_flag=finished,
                            use_pool=True)
                pool_process.join()
            else:
                logger.debug("===== DEBUG ======\n"
                             "Train SuperNet with no constraint")
                train_epoch()

            train_metric_name, train_metric_score = train_metric.get()
            throughput = int(batch_size * num_batches / (time.time() - tic))

            err_top1_val, err_top5_val = test(ctx, val_data, epoch)

            logger.info('[Epoch %d] training: %s=%f' %
                        (epoch, train_metric_name, train_metric_score))
            logger.info('[Epoch %d] speed: %d samples/sec\ttime cost: %f' %
                        (epoch, throughput, time.time() - tic))
            logger.info('[Epoch %d] validation: err-top1=%f err-top5=%f' %
                        (epoch, err_top1_val, err_top5_val))

            if err_top1_val < best_val_score:
                best_val_score = err_top1_val
                net.save_parameters(
                    '%s/%.4f-imagenet-%s-%d-best.params' %
                    (save_dir, best_val_score, model_name, epoch))
                trainer.save_states(
                    '%s/%.4f-imagenet-%s-%d-best.states' %
                    (save_dir, best_val_score, model_name, epoch))

            if save_frequency and save_dir and (epoch +
                                                1) % save_frequency == 0:
                net.save_parameters('%s/imagenet-%s-%d.params' %
                                    (save_dir, model_name, epoch))
                trainer.save_states('%s/imagenet-%s-%d.states' %
                                    (save_dir, model_name, epoch))

        if save_frequency and save_dir:
            net.save_parameters('%s/imagenet-%s-%d.params' %
                                (save_dir, model_name, opt.num_epochs - 1))
            trainer.save_states('%s/imagenet-%s-%d.states' %
                                (save_dir, model_name, opt.num_epochs - 1))
def sync_sale_orders():
    manager = mp.Manager()
    data_pool = manager.JoinableQueue()

    orders = {}
    with open('files/omlordr1.csv', newline='') as f:
        csv_reader = csv.DictReader(f)
        for vals in csv_reader:
            order_no = vals['ORDER-NO']
            orders.setdefault(order_no, [])
            orders[order_no].append(vals)

    for ref in orders:
        data_pool.put({'ref': ref, 'orders': orders[ref]})

    sock = xmlrpclib.ServerProxy(URL, allow_none=True)
    res = sock.execute(DB, UID, PSW, 'res.partner', 'search_read', ['|', ('active', '=', False), ('active', '=', True)],
                       ['customer_code'])
    customers = {rec['customer_code']: rec['id'] for rec in res}
    partner_ids = manager.dict(customers)

    sale_rep = sock.execute(DB, UID, PSW, 'res.partner', 'search_read',
                            [('is_sales_person', '=', True), '|', ('active', '=', False), ('active', '=', True)],
                            ['id', 'sales_person_code'])
    sale_rep_ids = {rec['sales_person_code']: rec['id'] for rec in sale_rep}

    users = sock.execute(DB, UID, PSW, 'res.users', 'search_read',
                            [],
                            ['id', 'partner_id'])
    user_ids = {rec['partner_id'][0]: rec['id'] for rec in users}

    carriers = sock.execute(DB, UID, PSW, 'delivery.carrier', 'search_read', [], ['id', 'name'])
    carrier_ids = {rec['name']: rec['id'] for rec in carriers}
    carrier_ids = manager.dict(carrier_ids)

    misc_product_id = sock.execute(DB, UID, PSW, 'product.product', 'search_read', [('default_code', '=', 'misc' )], ['id'])
    if not misc_product_id:
        pro_vals = {'name':'Misc Charge', 'default_code':'misc','type': 'service',}
        misc_product_id = sock.execute(DB, UID, PSW, 'product.product', 'create', pro_vals)
    else:
        misc_product_id = misc_product_id[0]['id']
    delivery_product_id = sock.execute(DB, UID, PSW, 'product.product', 'search_read', [('default_code', '=', 'delivery_008' )], ['id'])
    if not delivery_product_id:
        del_pro_vals = {'name':'Delivery Charge', 'default_code':'delivery_008','type': 'service'}
        delivery_product_id = sock.execute(DB, UID, PSW, 'product.product', 'create', del_pro_vals)
    else:
        delivery_product_id = delivery_product_id[0]['id']
    res = sock.execute(DB, UID, PSW, 'product.product', 'search_read', ['|', ('active', '=', False), ('active', '=', True)], ['default_code'])

    payment_terms = sock.execute(DB, UID, PSW, 'account.payment.term', 'search_read', [('order_type', '=', 'sale')],
                                 ['id', 'code'])
    term_ids = {term['code']: term['id'] for term in payment_terms}
    workers = []
    for i in range(WORKERS):
        pid = "Worker-%d" % (i + 1)
        worker = mp.Process(name=pid, target=update_sale_order,
                            args=(pid, data_pool, partner_ids, term_ids, user_ids, sale_rep_ids, misc_product_id, delivery_product_id, carrier_ids))
        worker.start()
        workers.append(worker)

    data_pool.join()
示例#18
0
def vcf2json_multi2(filepath_vcf, filepath_json, md5, mode):
    #统计数据
    time_start = time.time()
    manager = multiprocessing.Manager()
    statisticArr = manager.Array("i", [0, 0, 0])

    fields, samples, headers, chunks = allel.iter_vcf_chunks(
        filepath_vcf, fields=['variants/*', 'calldata/*'], chunk_length=500)
    print(filepath_vcf)
    if os.path.exists(filepath_json):
        os.remove(filepath_json)
    #增加原vcf文件的头部信息, 用于逆向转换
    #addhead(headers[0], filepath_json)

    tmpfile = "value_" + md5 + ".dat"
    with open(tmpfile, "wb") as f:
        pickle.dump(fields, f)
        pickle.dump(samples, f)
        pickle.dump(headers, f)
        pickle.dump(filepath_json, f)

    cores = multiprocessing.cpu_count()
    processnum = max(int(cores / 2), 2)
    #processnum = min(cores, 20)
    #processnum = int(cores / 2)

    #自己调度迭代器 防止内存溢出
    pool = multiprocessing.Pool(processes=processnum)
    index = 0
    tmpchunks = []
    i = 0
    # for chunker in chunks:
    #     index+=1
    #     tmpchunks.append(chunker)
    #     if index % (processnum*10) == 0:
    #         # i += 1
    #         # print(("{0} - 1").format(i))
    #         pool.map(partial(IoOperat_multi, tmpfile, mode, statisticArr), tmpchunks)
    #         #print(("{0} - 2").format(i))
    #         #pool.map(partial(IoOperat_multi, tmpfile, mode, statisticArr), tmpchunks)
    #         # time.sleep(10)
    #         tmpchunks.clear()
    first = True
    realchunks = []

    for chunker in chunks:
        index += 1
        tmpchunks.append(chunker)
        if index % (processnum * 10) == 0:
            if not first:
                AppResult.get()
                realchunks.clear()
            realchunks = copy.deepcopy(tmpchunks)
            tmpchunks.clear()
            first = False
            AppResult = pool.map_async(
                partial(IoOperat_multi, tmpfile, mode, statisticArr),
                realchunks)

    if "AppResult" in locals().keys():
        AppResult.get()
    #print("last section")
    pool.map(partial(IoOperat_multi, tmpfile, mode, statisticArr), tmpchunks)
    tmpchunks.clear()
    if realchunks:
        realchunks.clear()
    pool.close()
    pool.join()  # 主进程阻塞等待子进程的退出
    os.remove(tmpfile)  # 删除临时文件,节约空间

    #保存统计数据
    filesize = os.path.getsize(filepath_json)
    time_end = time.time()
    time_cost = time_end - time_start
    dir = os.path.splitext(filepath_vcf)[0]
    #statisticFile = dir + '.txt'
    statisticFile = "vcf2json_results.txt"
示例#19
0
    def start(audio_input, hearing_perception_stimulated):

        if audio_input == "0":
            pass
        else:
            wf = wave.open(
                audio_input, 'rb'
            )  # Open .wav file from given path as audio_input in arguments

        p = pyaudio.PyAudio()  # Create a PyAudio session

        # Create a stream
        if audio_input == "0":
            stream = p.open(format=FORMAT,
                            channels=CHANNELS,
                            rate=RATE,
                            input=True,
                            frames_per_buffer=CHUNK)
        else:
            stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
                            channels=wf.getnchannels(),
                            rate=wf.getframerate(),
                            output=True)

        hearing_manager = multiprocessing.Manager(
        )  # Shared memory space manager
        memory_data = []  # Define memory data array
        all_frames = hearing_manager.list(
        )  # Define all_frames array in shared memory
        thresh_frames = hearing_manager.list(
        )  # Define thresh_frames array in shared memory

        if audio_input == "0":
            data = stream.read(CHUNK)  # Get first data frame from .wav file
        else:
            data = wf.readframes(CHUNK)  # Get first data frame from .wav file

        all_frames.append(data)  # Append to all frames
        thresh_frames.append(
            EMPTY_CHUNK)  # Append an EMPTY CHUNK to thresh frames

        process1 = multiprocessing.Process(
            target=HearingPerception.draw_waveform,
            args=(all_frames, thresh_frames))  # Define draw waveform process
        process1.start()  # Start draw waveform process

        process2 = multiprocessing.Process(
            target=HearingPerception.draw_spectrum_analyzer,
            args=(all_frames,
                  thresh_frames))  # Define draw spectrum analyzer process
        process2.start()  # Start drar spectrum analyzer process

        # Loop over the frames of the audio / data chunks
        while data != '':
            previous_data = data  # Get previous chunk that coming from end of the loop

            if audio_input == "0":
                data = stream.read(CHUNK)  # Read a new chunk from the stream
            else:
                stream.write(data)  # Monitor current chunk
                data = wf.readframes(CHUNK)  # Read a new chunk from the stream

            all_frames.append(data)  # Append this chunk to all frames
            thresh_frames.append(
                EMPTY_CHUNK)  # Append an EMPTY CHUNK to thresh frames

            rms = audioop.rms(data,
                              2)  # Calculate Root Mean Square of current chunk
            if rms >= THRESHOLD:  # If Root Mean Square value is greater than THRESHOLD constant
                starting_time = datetime.datetime.now(
                )  # Starting time of the memory
                hearing_perception_stimulated.value = 1  # Hearing perception stimulated
                thresh_frames.pop()  # Pop out last frame of thresh frames
                thresh_frames.pop()  # Pop out last frame of thresh frames
                memory_data.append(
                    previous_data)  # Append previous chunk to memory data
                thresh_frames.append(
                    previous_data)  # APpend previos chunk to thresh frames
                memory_data.append(data)  # Append current chunk to memory data
                thresh_frames.append(
                    data)  # Append current chunk to thresh frames
                silence_counter = 0  # Define silence counter
                while silence_counter < SILENCE_DETECTION:  # While silence counter value less than SILENCE_DETECTION constant

                    if audio_input == "0":
                        data = stream.read(
                            CHUNK)  # Read a new chunk from the stream
                    else:
                        stream.write(data)  # Monitor current chunk
                        data = wf.readframes(
                            CHUNK)  # Read a new chunk from the stream

                    all_frames.append(data)  # Append this chunk to all frames
                    memory_data.append(
                        data)  # Append this chunk to memory data
                    thresh_frames.append(
                        data)  # Append this chunk to thresh frames
                    rms = audioop.rms(
                        data,
                        2)  # Calculate Root Mean Square of current chunk again

                    if rms < THRESHOLD:  # If Root Mean Square value is less than THRESHOLD constant
                        silence_counter += 1  # Then increase silence counter
                    else:  # Else
                        silence_counter = 0  # Assign zero value to silence counter

                del memory_data[-(
                    SILENCE_DETECTION - 2
                ):]  # Delete last frames of memory data as much as SILENCE_DETECTION constant
                del thresh_frames[-(
                    SILENCE_DETECTION - 2
                ):]  # Delete last frames of thresh frames as much as SILENCE_DETECTION constant
                for i in range(SILENCE_DETECTION -
                               2):  # SILENCE_DETECTION constant times
                    thresh_frames.append(EMPTY_CHUNK)  # Append an EMPTY_CHUNK
                ending_time = datetime.datetime.now(
                )  # Ending time of the memory
                hearing_perception_stimulated.value = 0  # Hearing perception NOT stimulated

                memory_data = ''.join(memory_data)
                #HearingMemoryUtil.add_memory(memory_data, starting_time, ending_time)
                process3 = multiprocessing.Process(
                    target=HearingMemoryUtil.add_memory,
                    args=(memory_data, starting_time,
                          ending_time))  # Define write memory process
                process3.start()  # Start write memory process
                memory_data = []  # Empty memory data

        process1.terminate()  # Terminate draw waveform process
        process2.terminate()  # Terminate drar spectrum analyzer process
        stream.stop_stream()  # Stop the stream
        stream.close()  # Close the stream
        p.terminate()  # Terminate the session
示例#20
0
from . import js_ts_linter
from . import pre_commit_linter
from .. import common

CURR_DIR = os.path.abspath(os.getcwd())
OPPIA_TOOLS_DIR = os.path.join(CURR_DIR, os.pardir, 'oppia_tools')

ESPRIMA_PATH = os.path.join(
    OPPIA_TOOLS_DIR, 'esprima-%s' % common.ESPRIMA_VERSION)

sys.path.insert(1, ESPRIMA_PATH)

import esprima  # isort:skip  pylint: disable=wrong-import-order, wrong-import-position

NAME_SPACE = multiprocessing.Manager().Namespace()
PROCESSES = multiprocessing.Manager().dict()
NAME_SPACE.files = pre_commit_linter.FileCache()
FILE_CACHE = NAME_SPACE.files

LINTER_TESTS_DIR = os.path.join(os.getcwd(), 'scripts', 'linters', 'test_files')
VALID_JS_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'valid.js')
VALID_TS_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'valid.ts')
VALID_BACKEND_API_SERVICE_FILEPATH = os.path.join(
    LINTER_TESTS_DIR, 'valid-backend-api.service.ts')
INVALID_SORTED_DEPENDENCIES_FILEPATH = os.path.join(
    LINTER_TESTS_DIR, 'invalid_sorted_dependencies.ts')
INVALID_CONSTANT_IN_TS_FILEPATH = os.path.join(
    LINTER_TESTS_DIR, 'invalid_constant_in_ts_file.ts')
INVALID_CONSTANT_FILEPATH = os.path.join(
    LINTER_TESTS_DIR, 'invalid.constants.ts')
示例#21
0
文件: b.py 项目: arcturus9/examples
    def __init__(self,
                 rounds,
                 name='abce',
                 random_seed=None,
                 trade_logging='off',
                 processes=None):
        """
        """
        self.family_list = {}
        self.num_of_agents_in_group = {}
        self._messages = {}
        self._resource_command_group = {}
        self._db_commands = {}
        self.num_agents = 0
        self._build_first_run = True
        self.resource_endowment = defaultdict(list)
        self.perishable = []
        self.expiring = []
        self.variables_to_track_panel = defaultdict(list)
        self.variables_to_track_aggregate = defaultdict(list)
        self.possessins_to_track_panel = defaultdict(list)
        self.possessions_to_track_aggregate = defaultdict(list)
        self._start_round = 0
        self.round = int(self._start_round)
        self._calendar = False
        # this is default value as declared in self.network() method
        self._network_drawing_frequency = 1

        self.rounds = rounds

        try:
            os.makedirs(os.path.abspath('.') + '/result/')
        except OSError:
            pass

        self.path = (os.path.abspath('.') + '/result/' + name + '_' +
                     datetime.datetime.now().strftime("%Y-%m-%d_%H-%M"))
        """ the path variable contains the path to the simulation outcomes it can be used
        to generate your own graphs as all resulting csv files are there.
        """
        while True:
            try:
                os.makedirs(self.path)
                break
            except OSError:
                self.path += 'I'

        self.trade_logging_mode = trade_logging
        if self.trade_logging_mode not in ['individual', 'group', 'off']:
            SystemExit("trade_logging can be "
                       "'group' (fast) or 'individual' (slow) or 'off'"
                       ">" + self.trade_logging_mode + "< not accepted")

        manager = mp.Manager()
        self.database_queue = manager.Queue()
        self._db = abce.db.Database(self.path,
                                    self.database_queue,
                                    trade_log=self.trade_logging_mode != 'off')
        self.logger_queue = manager.Queue()

        self.processes = mp.cpu_count() * 2 if processes is None else processes

        MyManager.register('Family', Family)
        self.managers = []
        for i in range(self.processes):
            manager = MyManager()
            manager.start()

            self.managers.append(manager)

        if random_seed is None or random_seed == 0:
            random_seed = time.time()
        random.seed(random_seed)

        self.sim_parameters = OrderedDict({
            'name': name,
            'rounds': rounds,
            'random_seed': random_seed
        })

        if self.processes > 1:
            self.pool = mp.Pool(self.processes)
        self.execute_internal = self.execute_internal_parallel if self.processes > 1 else self.execute_internal_seriel
        self._agents_to_add = []  # container used in self.run
        self._agents_to_delete = []  # container used in self.run
        self.messagess = defaultdict(list)
"""MLT - Grading components (based on pytest fixtures).  		   	  			    		  		  		    	 		 		   		 		  
  		   	  			    		  		  		    	 		 		   		 		  
Note: Writes results to "comments.txt" in current working directory.  		   	  			    		  		  		    	 		 		   		 		  
"""

import pytest
import signal
from collections import namedtuple
from contextlib import contextmanager
import multiprocessing
import sys, traceback

timeout_manager = multiprocessing.Manager()

GradeResult = namedtuple('GradeResult', ['outcome', 'points', 'msg'])


class IncorrectOutput(Exception):
    pass


class TimeoutException(Exception):
    pass


class Grader(object):
    """Main grader class; an instance of this is passed in through a pytest fixture."""
    def __init__(self, max_points=None, html_pre_block=False):
        self.max_points = max_points
        self.html_pre_block = html_pre_block
        self.total_points = 0.0
示例#23
0
def step2_calc(df_input,
               dict_pref,
               path_input_file,
               progress_print_interval=5):
    # LOCAL SETTINGS
    # ==============

    # To limit memory usage when multiprocessing is employed, a maximum number of tasks is defined for a single process.
    # Therefore a process can not preserve data over this limit.

    mp_maxtasksperchild = 1000

    # Load kwargs

    dict_input_kwargs = df_input.to_dict(orient="index")
    list_kwargs = []
    for key, val in dict_input_kwargs.items():
        val["index"] = key
        list_kwargs.append(val)

    # Load settings

    dict_settings = dict_pref
    n_proc = dict_settings["n_proc"]

    # Check number of processes are to be used

    n_proc = os.cpu_count() if int(n_proc) < 1 else int(n_proc)

    # SIMULATION START

    print(
        __strformat_1_1.format("Input file:",
                               os.path.basename(path_input_file)))
    print(__strformat_1_1.format("Total simulations:", len(list_kwargs)))
    print(__strformat_1_1.format("Number of threads:", n_proc))

    time_simulation_start = time.perf_counter()
    m = mp.Manager()
    q = m.Queue()
    p = mp.Pool(n_proc, maxtasksperchild=mp_maxtasksperchild)
    jobs = p.map_async(calc_time_equiv_worker,
                       [(kwargs, q) for kwargs in list_kwargs])
    count_total_simulations = len(list_kwargs)
    n_steps = 24  # length of the progress bar
    while progress_print_interval:
        if jobs.ready():
            time_simulation_consumed = time.perf_counter(
            ) - time_simulation_start
            print("{}{} {:.1f}s".format('█' * round(n_steps), '-' * round(0),
                                        time_simulation_consumed))
            break
        else:
            p_ = q.qsize() / count_total_simulations * n_steps
            print("{}{} {:03.1f}%".format('█' * int(round(p_)),
                                          '-' * int(n_steps - round(p_)),
                                          p_ / n_steps * 100),
                  end='\r')
            time.sleep(1)
    p.close()
    p.join()
    results = jobs.get()

    # format outputs

    results = np.array(results)

    df_output = pd.DataFrame({
        'TIME STEP [s]':
        results[:, 0],
        'TIME START [s]':
        results[:, 1],
        'TIME LIMITING []':
        results[:, 2],
        'WINDOW HEIGHT [m]':
        results[:, 3],
        'WINDOW WIDTH [m]':
        results[:, 4],
        'WINDOW OPEN FRACTION []':
        results[:, 5],
        'ROOM BREADTH [m]':
        results[:, 6],
        'ROOM DEPTH [m]':
        results[:, 7],
        'ROOM HEIGHT [m]':
        results[:, 8],
        'ROOM WALL THERMAL INERTIA [J/m2s1/2K]':
        results[:, 9],
        'FIRE LOAD DENSITY [MJ/m2]':
        results[:, 10],
        'FIRE HRR DENSITY [MW/m2]':
        results[:, 11],
        'FIRE SPREAD SPEED [m/s]':
        results[:, 12],
        'FIRE DURATION [s]':
        results[:, 13],
        'BEAM POSITION [m]':
        results[:, 14],
        'BEAM RHO [kg/m3]':
        results[:, 15],
        'BEAM C [-]':
        results[:, 16],
        'BEAM CROSS-SECTION AREA [m2]':
        results[:, 17],
        'BEAM FAILURE TEMPERATURE [C]':
        results[:, 18],
        'PROTECTION K [W/m/K]':
        results[:, 19],
        'PROTECTION RHO [kg/m3]':
        results[:, 20],
        'PROTECTION C OBJECT []':
        results[:, 21],
        'PROTECTION THICKNESS [m]':
        results[:, 22],
        'PROTECTION PERIMETER [m]':
        results[:, 23],
        'ISO834 TIME ARRAY [s]':
        results[:, 24],
        'ISO834 TEMPERATURE ARRAY [K]':
        results[:, 25],
        'MAX. NEAR FIELD TEMPERATURE [C]':
        results[:, 26],
        'SEEK ITERATION LIMIT []':
        results[:, 27],
        'SEEK PROTECTION THICKNESS UPPER BOUND [m]':
        results[:, 28],
        'SEEK PROTECTION THICKNESS LOWER BOUND [m]':
        results[:, 29],
        'SEEK BEAM FAILURE TEMPERATURE TOLERANCE [K]':
        results[:, 30],
        'INDEX':
        results[:, 31],
        'TIME EQUIVALENCE [s]':
        results[:, 32],
        'SEEK STATUS [0:Fail, 1:Success]':
        results[:, 33],
        'FIRE TYPE [0:P, 1:T]':
        results[:, 34],
        'SOUGHT BEAM TEMPERATURE [K]':
        results[:, 35],
        'SOUGHT BEAM PROTECTION THICKNESS [m]':
        results[:, 36],
        'SOUGHT ITERATIONS []':
        results[:, 37],
        'BEAM TEMPERATURE TO FIXED PROTECTION THICKNESS [K]':
        results[:, 38],
        'FIRE TIME ARRAY [s]':
        results[:, 39],
        'FIRE TEMPERATURE ARRAY [K]':
        results[:, 40],
        'OPENING FACTOR [m0.5]':
        results[:, 41]
    })

    df_output.set_index(
        "INDEX", inplace=True)  # assign 'INDEX' column as DataFrame index

    df_output.sort_values('TIME EQUIVALENCE [s]',
                          inplace=True)  # sort base on time equivalence

    path_results_file = os.path.join(
        os.path.dirname(path_input_file), "{} - {}".format(
            os.path.basename(path_input_file).split('.')[0], __fn_output))
    pdump(df_output, open(path_results_file, "wb"))

    return df_output
示例#24
0
def main(cli_params=None): # params optional in order to enable test script to run
	# let's set up the argument parser and execute it on the input
	parser = argparse.ArgumentParser(description='CircAidMe v' + __version__ + ' -- Tool for the analysis of CircAID-p-seq data -- Designed and implemented by Genexa AG, Switzerland (genexa.ch) & Immagina BioTechnology S.R.L., Italy (immaginabiotech.com)', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
	parser._action_groups.pop()
	required = parser.add_argument_group('required arguments')
	optional = parser.add_argument_group('optional arguments')
	required.add_argument('--input-file', dest="input_file", required=True,
			type=str,
			help='FASTA/FASTQ file with CircAID-p-seq data')
	required.add_argument('--out-path', dest="out_path", required=True,
			type=str,
			help='path to store results (also used for temp files)')
	required.add_argument('--adapter-name', dest='adapter_name', required=True,
			type=str,
			help='define which adapter to be used OR "ALL" for all the available adapters OR "LIST" if you want to provide the list of adapters to be used with argument "--adapter-list". Predefined adapters are: \"Luc20_DNA, ADR7391_RNA, ADR1_RNA, ADR2_RNA, ADR3_RNA,ADR4_RNA, ADR1572_RNA, ADR1859_RNA, ADR2520_RNA, ADR2858_RNA, ADR323_RNA, ADR4314_RNA, ADR4557_RNA, ADR4885_RNA, ADR5555_RNA\"')
	optional.add_argument('--adapter-list', dest='adapter_list',
			type=str,
			help='for user-defined adapter list (comma separated list)')
	optional.add_argument('--force-overwrite', action='store_true',
			help='set flag if you want to overwrite result files')
	optional.add_argument('--tag', dest='tag',
			type=str, default= "none",
			help='tag to be added to the output FASTA file')
	optional.add_argument('--refine-adapter-alignment', dest='refine_adapter',
			type=str, default="True",
			choices={"False", "True"},
			help='choose if adapter alignment has to be refined')
	optional.add_argument('--min-inserts', dest='min_inserts',
			type=int, default=parameter.min_inserts,
			help='number of inserts which have to be present in order to calculate a consensus sequence')
	optional.add_argument('--cons-min-len', dest='cons_min_len',
			type=int, default=parameter.consensus_min_len,
			help='minimal length of the consensus sequence')
	optional.add_argument('--cons-max-len', dest='cons_max_len',
			type=int, default=parameter.consensus_max_len,
			help='maximal length of the consensus sequence')
	optional.add_argument('--keep-forward', action='store_true',
			help='set flag if reads with only "forward" inserts must be kept')
	optional.add_argument('--no-store-removed-reads', action='store_true',
			help='set flag if removed reads do NOT have to be written to a separate FASTA file')
	optional.add_argument('--iter-first-muscle', dest='iter_first_muscle',
			type=int, default=2,
			choices={1,2,3},
			help='number of iterations MUSCLE has to perform for first MSA calculation')
	optional.add_argument('--iter-second-muscle', dest='iter_second_muscle',
			type=int, default=3,
			choices={1,2,3,4},
			help='number of iterations MUSCLE has to perform for second MSA calculation')
	optional.add_argument('--threads', dest='threads',
			type=int, default=1,
			help='number of threads to be used')
	optional.add_argument('--version', action='version', version='%(prog)s version ' + __version__)


	if(cli_params == None): args = parser.parse_args() # parse input from user
	else: args = parser.parse_args(cli_params)         # parse input provided by function call

	

	refine_adapter = True if args.refine_adapter == "True" else False
	exclude_forward = True if args.keep_forward == False else False

	# handle adapter input. We have either 1) one adapter 2) all adapters or 3) a selection of adapters defined by user
	if(args.adapter_name == "ALL"):
		adapter_names = parameter.adapter.keys()
		adapter_names = list(adapter_names)
	elif(args.adapter_name == "LIST"):
		adapter_names = args.adapter_list.replace(" ", "").split(',')
		if(not set(adapter_names).issubset(parameter.adapter)):
			print("")
			print("ERROR: User provided an unknown adapter name.")
			quit()
	else:
		adapter_names = [args.adapter_name]
		if(not set(adapter_names).issubset(parameter.adapter)):
			print("")
			print("ERROR: User provided an unknown adapter name.")
			quit()

	adapter_names.sort() # for reproducibility

	# check if output path exists
	if(not os.path.exists(args.out_path)):
		print("")
		print("ERROR: Provided output path does not exist. Please provide a valid output path. Will exit now.")
		quit()

	# little sanity check to see if we have FASTA/FASTQ. We do just check this via file extension and not via parsing the file:
	file_ending = pathlib.Path(args.input_file).suffix[1:]
	if( not ( file_ending in ["fasta","fastq"] ) ):
		print("")
		print("ERROR: Wrong file extension. Stopping analysis.")
		quit()

	# handle the tag provided by the user (if there is one)
	if(args.tag != "none"):
		tag = "_" + args.tag
	else:
		tag = ""
	file_id = pathlib.Path(args.input_file).with_suffix('').name + tag

	# clean up the output folder from temp. files before we start:
	# -> later we could define an temp folder to sepparate temp files from output...
	classes.Alignments.cleanup_all(args.out_path)

	# clean up existing output files (if they already exist only overwrite if "force-overwrite"-flag is set):
	if os.path.exists(args.out_path + "/" + file_id + ".fasta"): # contains the results
		if(args.force_overwrite == False):
			print("")
			print("ERROR: Output file already exists. If you want to overwrite please set \"force-overwrite\" flag to \"True\".")
			quit()
		os.remove(args.out_path + "/" + file_id + ".fasta")
	if os.path.exists(args.out_path + "/" + file_id + "_removed_reads.fasta"): # contains ont reads which did not result in a consensus
		os.remove(args.out_path + "/" + file_id + "_removed_reads.fasta")
	if os.path.exists(args.out_path + "/" + file_id + ".log"): # log file for an individual run
		os.remove(args.out_path + "/" + file_id + ".log")
	if os.path.exists(args.out_path + "/" + file_id + ".csv"): # statistics per read
		os.remove(args.out_path + "/" + file_id + ".csv")

	# clean up temp folder it it exists and generate it:
	if(os.path.exists(args.out_path + "/tmp_work_dir")):
		shutil.rmtree(args.out_path + "/tmp_work_dir")
	os.mkdir(args.out_path + "/tmp_work_dir")

	# init the logger
	logger = log.Log(args.out_path + "/" + file_id + ".log")

	# list to keep track of processes (workers) doing the calculations:
	procs = []
	# this we need for gracefull exit:
	flag = consensus.GracefulExiter()

	# create lock file in order to lock access to output files:
	lock = multiprocessing.Lock()

	manager = multiprocessing.Manager()
	stats = manager.dict()
	stats_per_read = manager.dict()

	stats["cnt_in_reads"] = 0
	stats["cnt_split_in_reads"] = 0
	stats["cnt_non_split_in_reads"] = 0
	stats["cnt_split_generated_reads"] = 0
	stats["cnt_proper_consensus"] = 0
	stats["fewer_two_adapters_found"] = 0
	stats["fewer_min_inserts_found"] = 0
	stats["only_forward_inserts"] = 0
	stats["problematic_insert_orientation"] = 0
	stats["bad_MSA"] = 0
	stats["adapter_as_insert"] = 0
	stats["no_minlen_subread_after_split"] = 0
	stats["consensus_size_out_of_range"] = 0
	stats["cnt_short_in_reads"] = 0

	logger.note_start()
	logger.note_command(args)

	# fetch all the ONT reads using BioPython, one by one:
	for record in SeqIO.parse(args.input_file, file_ending):

		# count the imported reads:
		with lock:
			classes.Stat.inc_key("cnt_in_reads", stats)

		waiting = True

		# now we start "args.threads" number of jobs. whenever one is done we join the process and start a new one:
		while(waiting):
			if( len(procs) < int(args.threads) ): # if we have open slots, start new processes
				# This starts the actual calculation -- one process per read:
				p = multiprocessing.Process(target=consensus.analyzeRead, args=(args.out_path, file_id, record, adapter_names, refine_adapter, exclude_forward, args.min_inserts, args.cons_min_len, args.cons_max_len, args.iter_first_muscle, args.iter_second_muscle, stats, stats_per_read, args.no_store_removed_reads ,lock))
				procs.append(p) # append this process to the list holding all currently running processes
				p.start()
				waiting = False
			time.sleep(0.0001)

			for num,proc in enumerate(procs):
				if(not proc.is_alive()): # if a process is done and waiting, lets join the process and remove it from the "procs" list
					proc.join()
					procs.pop(num)
			if flag.exit(): # if user wants to exit let's do a gracefull exit. alows to clean up everything.
				break
		if flag.exit():
			break

	# wait until all remaining processes are done -- or kill them if needed (graceful exit):

	# tell proccesses to terminate -- if we stopped the analysis
	if flag.exit():
		logger.note_stop() # add a not to the logfile that user interrupted analysis run
		for proc in procs:
			if(proc.is_alive()):
				proc.terminate()

	time.sleep(0.3) # wait a bit so that the child processes of "proc" can stop... Not ideal, might be handeled with "kill()" later

	# wait until they are done
	while(len(procs) > 0):
		time.sleep(0.0001)
		for num,proc in enumerate(procs):
			if(not proc.is_alive()):
				proc.join()
				procs.pop(num)

	if(not flag.exit()):
		# calculate statistics based on "per read" information
		stats_overall_per_read = classes.Stat.print_read_stats(stats_per_read, args.out_path + "/" + file_id + ".csv")

		# write the statistics to the log file:
		classes.Stat.write_stats(stats, stats_overall_per_read, logger)

		logger.note_done()

	# do a final clean-up in the output folder:
	classes.Alignments.cleanup_all(args.out_path)
示例#25
0
    def discover_all_switches(self, npv_discovery=True):
        """
        Discovers all the switches in the fabric

        :param npv_discovery: Set to true if npv switches needs to be discovered
        :type npv_discovery: bool (default True)
        :return: Returns all switches in the fabric
        :rtype: dict

        :example:
            >>> from mdssdk.fabric import Fabric
            >>> f = Fabric(ip, user, pw, verify_ssl=False)
            >>> out= f.discover_all_switches(npv_discovery=True)
            >>> print(out)
            Discovering all switches in the fabric(seed ip: 10.127.190.34). Please wait...
            10.127.190.55: is not an MDS switch, hence skipping it.
            {'10.127.190.34': <mdssdk.switch.Switch object at 0x10f14e978>, '10.127.190.50': <mdssdk.switch.Switch object at 0x10f14e908>}
            >>>
            >>>
        """
        print("Discovering all switches in the fabric(seed ip: " +
              self.__ip_address + "). Please wait...")
        npv = self.seed_switch.npv
        if npv:
            raise UnsupportedSeedSwitch(
                "Cannot discover the fabric using an NPV switch, Please use an NPIV switch for discovery"
            )
        while self._ips_to_be_discovered.__len__() != 0:
            m = multiprocessing.Manager()
            lock = m.Lock()
            allfutures = []
            executor = ThreadPoolExecutor(len(self._ips_to_be_discovered))
            for ip in self._ips_to_be_discovered:
                fut = executor.submit(
                    self.__connect_to_switch,
                    lock,
                    ip,
                    self.__username,
                    self.__password,
                    self.connection_type,
                    self.timeout,
                    npv_discovery,
                )
                allfutures.append(fut)
            wait(allfutures)
            for fut in allfutures:
                self._ips_to_be_discovered = list(
                    set(self._ips_to_be_discovered))
                # print(self._ips_to_be_discovered)
                try:
                    log.debug(fut.result())
                except Exception as e:
                    template = "An exception of type {0} occurred. Arguments:\n{1!r}"
                    message = template.format(type(e).__name__, e.args)
                    # print(message)
                    # log.exception("Executor Exception")
                    z = self._extract_ip_from_exception_str(e)
                    if z is not None:
                        self._ips_to_be_discovered.remove(z)
                    if "UnsupportedSwitch" == type(e).__name__:
                        msg = z + ": is not an MDS switch, hence skipping it."
                        self._ips_not_considered[z] = msg
                    elif "NetmikoAuthenticationException" == type(e).__name__:
                        msg = z + ": invalid username or password, hence skipping it."
                        self._ips_not_considered[z] = msg
                    elif "NetmikoTimeoutException" == type(e).__name__:
                        msg = z + ": unable to reach the switch, hence skipping it."
                        self._ips_not_considered[z] = msg
                    elif "ConnectionError" == type(e).__name__:
                        msg = (
                            z +
                            ": failed to establish http/https connection(check if nxapi is enabled), hence skipping it."
                        )
                        self._ips_not_considered[z] = msg

            for eachip in self._switches.keys():
                if eachip in self._ips_to_be_discovered:
                    self._ips_to_be_discovered.remove(eachip)
            for eachip in self._ips_not_considered.keys():
                if eachip in self._ips_to_be_discovered:
                    self._ips_to_be_discovered.remove(eachip)
        if self._ips_not_considered:
            for val in self._ips_not_considered.values():
                # print("_ips_not_considered")
                print(val)
        return self._switches
示例#26
0
文件: io.py 项目: seemethere/dask
def to_hdf(df,
           path_or_buf,
           key,
           mode='a',
           append=False,
           get=None,
           name_function=None,
           compute=True,
           lock=None,
           dask_kwargs={},
           **kwargs):
    name = 'to-hdf-' + uuid.uuid1().hex

    pd_to_hdf = getattr(df._partition_type, 'to_hdf')

    single_file = True
    single_node = True

    # if path_or_buf is string, format using i_name
    if isinstance(path_or_buf, str):
        if path_or_buf.count('*') + key.count('*') > 1:
            raise ValueError(
                "A maximum of one asterisk is accepted in file path and dataset key"
            )

        fmt_obj = lambda path_or_buf, i_name: path_or_buf.replace('*', i_name)

        if '*' in path_or_buf:
            single_file = False
    else:
        if key.count('*') > 1:
            raise ValueError(
                "A maximum of one asterisk is accepted in dataset key")

        fmt_obj = lambda path_or_buf, _: path_or_buf

    if '*' in key:
        single_node = False

    if 'format' in kwargs and kwargs['format'] != 'table':
        raise ValueError("Dask only support 'table' format in hdf files.")

    if mode not in ('a', 'w', 'r+'):
        raise ValueError("Mode must be one of 'a', 'w' or 'r+'")

    if name_function is None:
        name_function = build_name_function(df.npartitions - 1)

    # we guarantee partition order is preserved when its saved and read
    # so we enforce name_function to maintain the order of its input.
    if not (single_file and single_node):
        formatted_names = [name_function(i) for i in range(df.npartitions)]
        if formatted_names != sorted(formatted_names):
            warn("To preserve order between partitions name_function "
                 "must preserve the order of its input")

    # If user did not specify scheduler and write is sequential default to the
    # sequential scheduler. otherwise let the _get method choose the scheduler
    if get is None and 'get' not in _globals and single_node and single_file:
        get = get_sync

    # handle lock default based on whether we're writing to a single entity
    _actual_get = get or _globals.get('get') or df._default_get
    if lock is None:
        if not single_node:
            lock = True
        elif not single_file and _actual_get is not dask.multiprocessing.get:
            # if we're writing to multiple files with the multiprocessing
            # scheduler we don't need to lock
            lock = True
        else:
            lock = False

    if lock is True:
        if _actual_get == dask.multiprocessing.get:
            lock = multiprocessing.Manager().Lock()
        else:
            lock = Lock()

    kwargs.update({'format': 'table', 'mode': mode, 'append': append})

    dsk = dict()

    i_name = name_function(0)
    dsk[(name, 0)] = (_pd_to_hdf, pd_to_hdf, lock,
                      [(df._name, 0),
                       fmt_obj(path_or_buf, i_name),
                       key.replace('*', i_name)], kwargs)

    kwargs2 = kwargs.copy()
    if single_file:
        kwargs2['mode'] = 'a'
    if single_node:
        kwargs2['append'] = True

    for i in range(1, df.npartitions):
        i_name = name_function(i)
        task = (_pd_to_hdf, pd_to_hdf, lock, [(df._name, i),
                                              fmt_obj(path_or_buf, i_name),
                                              key.replace('*',
                                                          i_name)], kwargs2)
        if single_file:
            link_dep = i - 1 if single_node else 0
            task = (_link, (name, link_dep), task)
        dsk[(name, i)] = task

    dsk = merge(df.dask, dsk)
    if single_file and single_node:
        keys = [(name, df.npartitions - 1)]
    else:
        keys = [(name, i) for i in range(df.npartitions)]

    if compute:
        return DataFrame._get(dsk, keys, get=get, **dask_kwargs)
    else:
        return delayed([Delayed(key, [dsk]) for key in keys])
示例#27
0
            if len(testcases) == 0:
                print("cannot find testcase for : %s" % arg.case)
                sys.exit(0)

    opts = ' '.join(sorted(['O' + o for o in arg.opts]))

    if arg.pg_flag:
        flags = ['pg']
    elif arg.if_flag:
        flags = ['finstrument-functions']
    else:
        flags = arg.flags.split()

    from functools import partial

    manager = multiprocessing.Manager()
    shared = manager.dict()

    shared.tests_count = len(testcases)
    shared.progress = 0
    shared.results = dict()
    shared.diffs = dict()
    shared.total = 0
    res = []
    res.append(TestBase.TEST_SUCCESS)
    res.append(TestBase.TEST_SUCCESS_FIXED)
    res.append(TestBase.TEST_DIFF_RESULT)
    res.append(TestBase.TEST_NONZERO_RETURN)
    res.append(TestBase.TEST_ABNORMAL_EXIT)
    res.append(TestBase.TEST_TIME_OUT)
    res.append(TestBase.TEST_BUILD_FAIL)
示例#28
0
def main():

    #Setup initial variables
    args = get_args()

    #HTML Request Variables

    #Request data
    fmt_str = "{}={}&{}={}".format(args.user_param, "{}", args.pass_param, "{}")
    if args.extra_param:
        for a in args.extra_param:
            fmt_str = fmt_str + "&{}".format(a[0])
    #Headers
    hdr = {
            "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246",
            "Accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
            "Accept-Language" : "en-US,en;q=0.5",
            "Accept-Encoding" : "gzip, deflate",
            "Referer" : args.url,
            "Content-Type" : "application/x-www-form-urlencoded",
            "Connection" : "close",
            }

    #Get crack mode
    crack_mode = get_crack_mode(args.login_list, \
        args.password_list, args.login, args.password)

        #If there's a bad file provided or some other error in crack_mode derivation
    if not crack_mode : exit(0)

    #Instantiate workers
    m = multiprocessing.Manager()
    login_q = m.Queue()
    done_q = m.Queue()
    len_q = m.Queue()
    struck_gold = multiprocessing.Event()
    kill_flag = multiprocessing.Event()
    start_time = time()

    for i in range(args.threads):
         t = multiprocessing.Process(target=guesser, args=(args.url, fmt_str, hdr, \
            login_q, args.success_match, args.success_exclude, \
            kill_flag, struck_gold, done_q))
         t.start()

    #Now we have mode, carry out attack in whatever way specified
    if crack_mode == 'double':
        #double_crack(args.login_list, args.password_list, login_q, len_q)
        t = multiprocessing.Process(target=double_crack, args=(
            args.login_list, args.password_list, login_q, len_q, ))
    elif crack_mode == 'user':
        #single_crack(args.login_list, args.password, False, login_q, len_q)
        t = multiprocessing.Process(target=single_crack, args=(
            args.login_list, args.password, False, login_q, len_q, ))
    elif crack_mode == 'password':
        #single_crack(args.password_list, args.login, True, login_q, len_q)
        t = multiprocessing.Process(target=single_crack, args=(
            args.password_list, args.login, True, login_q, len_q, ))
    else:
        bc.err("Brute force mode invalid - {}. Exiting.".format(crack_mode))
        kill_flag.set()
        sleep(0.5)
        exit(0)

    bc.info("Workers initialised. Calculating effort required.")
    #Start the bruteforce thread, reading passwords into the worker queue
    t.start()

    #When available get the number of guesses
    n_guesses = len_q.get()
    #bc.info("guesses total : {}".format(n_guesses))
    last_progress = 0.0

    with progressbar.ProgressBar(max_value= n_guesses) as bar:
        while True:
            try:
                done = done_q.qsize()
            except Exception as e:
                bc.warn("Error when checking progress : {}".format(e))
                bc.info("Continuing")
            progress = round( (done / n_guesses ) * 100 , 0)
            if struck_gold.is_set() and not args.cont:
                kill_flag.set()
                bc.info("Creds found, continue flag not set. Finishing.")
                break
            elif progress >= 100.0 and login_q.empty():
                kill_flag.set()
                sleep(1)
                print()
                bc.info("Brute complete. Shutting down...")
                break
            else:
                #Just waiting for a mate
                bar.update(done)
                sleep(1)

    #Gracefully kill everything
    for p in multiprocessing.active_children():
        p.join(0.5)
示例#29
0
def startServer(localSettings='localSettings',
                appPort=7000,
                logFilename="logFile.log"):
    global theApp
    bufferSize = 64 * 1024
    password = str(
        snowflake.make_snowflake(
            snowflake_file=os.path.join(base_dir, 'snowflake')))
    manager = multiprocessing.Manager()
    servers = manager.list()
    workers = manager.list()
    backupInfo = manager.dict()
    for i in range(0, 10000):
        workers.append(-1)
    if os.path.isfile(os.path.join(base_dir, 'secretEnc')):
        pyAesCrypt.decryptFile(os.path.join(base_dir, "secretEnc"),
                               os.path.join(base_dir, "secretPlain"), password,
                               bufferSize)
        file = open(os.path.join(base_dir, "secretPlain"), 'r')
        aList = json.load(file)
        for server in aList:
            servers.append(server)
        file.close()
        os.remove(os.path.join(base_dir, "secretPlain"))
    if os.path.isabs(localSettings):
        setFile = localSettings
    else:
        setFile = os.path.join(base_dir, localSettings)
    globals = setupLocalSettings(setFile)
    theApp = app.createApp(theServers=servers, theWorkers=workers)
    p = multiprocessing.Process(target=backupServer.startBackup,
                                args=(os.path.join(base_dir, 'static',
                                                   'rsync'), backupInfo,
                                      setFile))
    p.start()
    multiprocesses.append({
        'name': 'Backup',
        'pid': p.pid,
        'description': 'DAQBroker backup process'
    })
    time.sleep(1)
    p = multiprocessing.Process(target=logServer.logServer,
                                args=(globals["logport"], base_dir),
                                kwargs={'logFilename': logFilename})
    p.start()
    multiprocesses.append({
        'name': 'Logger',
        'pid': p.pid,
        'description': 'DAQBroker log process'
    })
    time.sleep(1)
    p = multiprocessing.Process(target=commServer.collector,
                                args=(servers, globals["commport"],
                                      globals["logport"], backupInfo, setFile))
    p.start()
    multiprocesses.append({
        'name': 'Collector',
        'pid': p.pid,
        'description': 'DAQBroker message collector process'
    })
    time.sleep(1)
    p = multiprocessing.Process(target=monitorServer.producer,
                                args=(servers, globals["commport"],
                                      globals["logport"], False, backupInfo,
                                      workers, setFile))
    p.start()
    multiprocesses.append({
        'name':
        'Producer',
        'pid':
        p.pid,
        'description':
        'DAQBroker broadcasting server process'
    })
    time.sleep(1)
    print("STARTED", multiprocesses)
    http_server = HTTPServer(WSGIContainer(theApp))
    http_server.listen(appPort)
    webbrowser.open('http://localhost:' + str(appPort) + "/daqbroker")
    IOLoop.instance().start()
示例#30
0
    scores = json.load(open(scores_file))
    classifier = joblib.load('Happy_trained_RandomForest_with_pose.gz')

    vids_file = 'happy_predic_vids.txt'
    vids_done = {}
    original_len = len(vids_done)
    files = [
        x for x in (os.path.join(OpenDir, vid_dir)
                    for vid_dir in os.listdir(OpenDir))
        if (os.path.isdir(x) and 'au.txt' in os.listdir(x))
    ]

    if os.path.exists(vids_file):
        vids_done = json.load(vids_file)
    remaining = [x for x in files if x not in vids_done]
    out_q = multiprocessing.Manager().Queue()
    # f = functools.partial(mark_vid_dir, out_q)
    bar = progressbar.ProgressBar(redirect_stdout=True,
                                  max_value=len(remaining))
    # for i, _ in enumerate(Pool().imap(f, remaining), 1):

    for i, remain in enumerate(remaining, 1):
        mark_vid_dir(out_q, remain)
        bar.update(i)

    while not out_q.empty():
        vids_done.update(out_q.get())

    if len(vids_done) != original_len:
        json.dump(vids_done, open(scores_file, 'w'))