Example #1
0
def main():

    parser = ArgumentParser(description="Speed up your SHA. A different hash style.")
    parser.add_argument("-1", "--sha1", action="store_true")
    parser.add_argument("-2", "--sha224", action="store_true")
    parser.add_argument("-3", "--sha256", action="store_true")
    parser.add_argument("-4", "--sha384", action="store_true")
    parser.add_argument("-5", "--sha512", action="store_true")
    parser.add_argument("-f", "--file", type=str, help="The path to the file")

    if len(sys.argv) == 1:
        parser.print_help()
        return

    global args
    args = parser.parse_args()

    hashtree = ""

    big_file = open(args.file, "rb")
    pool = Pool(multiprocessing.cpu_count())

    for chunk_hash in pool.imap(hashing, chunks(big_file)):
        hashtree = hashtree + chunk_hash

    pool.terminate()

    if os.path.getsize(args.file) < 20971520:
        print(hashtree)
    else:
        print(str(hashing(hashtree)))
Example #2
0
    def do_experiment(self, params):
        """ runs one experiment programatically and returns.
            params: either parameter dictionary (for one single experiment) or a list of parameter
            dictionaries (for several experiments).
        """
        paramlist = self.expand_param_list(params)

        # create directories, write config files
        for pl in paramlist:
            # check for required param keys
            if ("name" in pl) and ("iterations" in pl) and ("repetitions" in pl) and ("path" in pl):
                self.create_dir(pl, self.options.delete)
            else:
                print "Error: parameter set does not contain all required keys: name, iterations, repetitions, path"
                return False

        # create experiment list
        explist = []

        # expand paramlist for all repetitions and add self and rep number
        for p in paramlist:
            explist.extend(zip([self] * p["repetitions"], [p] * p["repetitions"], xrange(p["repetitions"])))

        # if only 1 process is required call each experiment seperately (no worker pool)
        if self.options.ncores == 1:
            for e in explist:
                mp_runrep(e)
        else:
            # create worker processes
            pool = Pool(processes=self.options.ncores)
            pool.map(mp_runrep, explist)

        return True
Example #3
0
def main():
    jobConfigs = [JobConfig(CronTab("* * * * *"), stat_update), JobConfig(CronTab("* * * * *"), led_update)]
    p = Pool(len(jobConfigs))
    try:
        p.map(job_controller, jobConfigs)
    except KeyboardInterrupt:
        neoPixelConfig.turnoff_all()
Example #4
0
 def imap_unordered(self, func, iterable, chunksize=1):
     """Same as SGEPool.imap, except that the results are unordered.
     Rather than blocking to ensure the correct order, all jobs are polled and
     results are returned as soon as they are done.
     """
     if not self.use_grid_engine:
         workerPool = Pool(initializer=self.initializer, initargs=self.initargs)
         for val in workerPool.imap_unordered(func, iterable, chunksize):
             yield val
     iterable = iter(iterable)
     allJobs = self._submit_jobs(func, iterable, "map", chunksize)
     interval = 3
     while len(allJobs) > 0:
         doneJobs = []
         for job in allJobs:
             if job.isFinished():
                 doneJobs.append(job)
                 for data in self._getData(job.outputFile):
                     yield data
                 os.remove(job.inputFile)  # BUG: these files aren't removed if there is an exception raised
                 os.remove(job.outputFile)
         for job in doneJobs:
             allJobs.remove(job)
         if len(doneJobs) == 0:
             # no jobs are done yet-- wait for a while for them to finish
             time.sleep(interval)
             interval = min(2 * interval, 60)
Example #5
0
def runLimit(job, fork, timeout, rootfname):
    pointsOut = []
    if fork > 1:
        pool = Pool(fork)
        res = []
    for p in job["signal"]:
        channels = []
        for k, v in job["other"].iteritems():
            channels.append(v.copy())
            channels[-1].update(p[k])

        glob = {}
        if cfg.use_nloxs:
            glob["xs"] = channels[0]["nloxs"]
        else:
            glob["xs"] = channels[0]["loxs"]
        (glob["m0"], glob["m1/2"]) = (channels[0]["m0"], channels[0]["m1/2"])
        glob["lumiError"] = channels[0]["lumiError"]
        args = [job["actions"], glob, channels, rootfname]
        if fork > 1:
            res.append(pool.apply_async(workOnPoint, args))
        else:
            pointsOut.append(workOnPoint(*args))
    if fork > 1:
        for result in res:
            try:
                print "Waiting..."
                pointsOut.append(result.get(timeout))
            except TimeoutError:
                continue

    return pointsOut
Example #6
0
    def get(self):
        mode = toAlpha3Code(self.get_argument("lang"))
        text = self.get_argument("q")
        if not text:
            self.send_error(400, explanation="Missing q argument")
            return

        def handleCoverage(coverage):
            if coverage is None:
                self.send_error(408, explanation="Request timed out")
            else:
                self.sendResponse([coverage])

        if mode in self.analyzers:
            pool = Pool(processes=1)
            result = pool.apply_async(getCoverage, [text, self.analyzers[mode][0], self.analyzers[mode][1]])
            pool.close()

            @run_async_thread
            def worker(callback):
                try:
                    callback(result.get(timeout=self.timeout))
                except TimeoutError:
                    pool.terminate()
                    callback(None)

            coverage = yield tornado.gen.Task(worker)
            handleCoverage(coverage)
        else:
            self.send_error(400, explanation="That mode is not installed")
def run(db, fs, workers=1, poll_interval=0.1):
    """Run the compute device, querying the database and doing
    relevant work."""
    device_id = random.randrange(sys.maxint)
    log(device_id, message="Starting device loop for device %s..." % device_id)
    pool = Pool(processes=workers)
    results = {}
    outputs = {}
    while True:
        # Queue up all unevaluated cells we requested
        for X in db.get_unevaluated_cells(device_id):
            code = X["input"]
            log(device_id, X["_id"], message="evaluating '%s'" % code)
            results[X["_id"]] = pool.apply_async(execute_code, (X["_id"], code))
            outputs[X["_id"]] = ""
        # Get whatever results are done
        finished = set(_id for _id, r in results.iteritems() if r.ready())
        changed = set()
        while not outQueue.empty():
            _id, out = outQueue.get()
            outputs[_id] += out
            changed.add(_id)
        for _id in changed:
            db.set_output(_id, make_output_json(outputs[_id], _id in finished))
        for _id in finished - changed:
            db.set_output(_id, make_output_json(outputs[_id], True))
        # delete the output that I'm finished with
        for _id in finished:
            del results[_id]
            del outputs[_id]

        time.sleep(poll_interval)
Example #8
0
def crawl_estates_detail(dist=None, update=False):
    from models import RealEstate, Subdistrict, District
    from multiprocessing import Pool

    if not update:
        e = RealEstate.objects.filter(updated=0)
    if "__iter__" in dir(dist):  # in case of empty list
        estates = []
        # FIXME: not returning QuerySet!!!
        # FIXME: hence no support for list of dists
        for sd in dist:
            estates += e.filter(subdist=dist)
    elif dist:
        if dist.__class__ == Subdistrict:
            estates = e.filter(subdist=dist)
        elif dist.__class__ == District:
            estates = e.filter(subdist__dist=dist)
        else:
            raise TypeError
    else:
        estates = e
    pool = Pool(NUM_WORKERS)
    p = pool.map_async(update_estate_detail_pool_worker, estates)
    try:
        p.get(0xFFFF)
    except:
        pool.terminate()
def brutePlugin(pluginlist, foundplug, hosti, pathi, porti, securei, psize):
    global host
    host = hosti
    global port
    port = porti
    global secure
    secure = securei
    global plugfound
    plugfound = foundplug
    global path
    path = pathi
    f = open(plugfound, "w").close()
    listsize = len(pluginlist)

    # manage pool
    if psize == 0:
        psize = 5
    if list <= psize:
        chunksize = 1
    else:
        chunksize = (listsize / psize) + (listsize % psize)
    print("Plugin list size: %d\tChunk size: %d\tPool size: %d" % ((listsize), chunksize, psize))
    print("Plugin bruteforcing started")
    pool = Pool(processes=psize)
    for chunk in itertools.izip(grouper(pluginlist, chunksize)):
        pool.map_async(worker, chunk)
    pool.close()
    try:
        while len(active_children()) > 0:  # how many active children do we have
            sleep(2)
            ignore = active_children()
    except KeyboardInterrupt:
        exit("CTRL^C caught, exiting...\n\n")
    print("Plugin bruteforce complete")
Example #10
0
def main():
    vfiles = glob.glob(args.version_directory + "/*" + args.version_extension)
    logs = glob.glob(args.log_directory + "/*" + args.log_extension)

    for vfile in vfiles:
        with open(vfile, "r") as vfilef:
            for line in vfilef:
                # File format: <regex> <timestamp>
                chunks = line.rstrip().split()
                RE_DICT.append([chunks[0], chunks[1]])

                # use multiple processes to split logs
    pool = Pool()
    try:
        if args.all:
            args.outfile.write("Date UA\n")
            for log in logs:
                get_releasedates(log, args.outfile)
        else:
            results = pool.imap_unordered(get_all_matching, logs)  # parse_logs

            # 	    args.outfile.write('User Max Min Mode Mode_UA\n')
            for result in results:
                args.outfile.write(result)
    except KeyboardInterrupt:
        sys.exit()
Example #11
0
def do_crawl_estate_in_district(dist):
    from multiprocessing import Pool
    from models import District, Subdistrict

    if dist.__class__ is District:
        url = url_from_district(dist)
    elif dist.__class__ is Subdistrict:
        url = url_from_subdistrict(dist)
    else:
        raise TypeError
    n_estates = get_estate_count(url)
    n_pages = (n_estates - 1) / ITEM_PER_PAGE + 1
    pool = Pool(NUM_WORKERS)
    # used to be:
    # results = pool.map(crawl_estate_in_page, [url + PAGE_SFX % p for p in range(1, n_pages + 1)])
    p = pool.map_async(crawl_estate_in_page_pool_worker, [url + PAGE_SFX % p for p in range(1, n_pages + 1)])
    try:
        results = p.get(0xFFFF)
    except:
        pool.terminate()
        results = 0
    n_found = 0
    if results:
        for r in results:
            n_found += r
    print n_estates, "/", n_found
    # attempt to solve 'Too may open files' issue:
    # pool.terminate()
    return n_found == n_estates
Example #12
0
def run(intervaldict, background_frequencies, TFs, databasefile):
    TFIntervaldict = dict()
    TFPSSMdict = functions.parse_PSSM(databasefile, TFs)
    sequencelist = list()
    for chrom in intervaldict:
        for interval in intervaldict[chrom]:
            if len(interval[2]) > 0:
                forward = interval[2]
                reverse = functions.reverse(forward)
                sequencelist.append(forward)
                sequencelist.append(reverse)
    # args = [0] * len(sequencelist)
    for TF in TFPSSMdict:
        print TF
        TFIntervaldict[TF] = list()
        processes = 30
        for i in range(len(sequencelist) / processes):
            args = [0] * processes
            k = 0
            for j in range(i * processes, i * processes + processes):
                args[k] = (TFPSSMdict[TF], background_frequencies, sequencelist[j])
                k += 1
            pool = Pool(processes=processes)
            result = pool.map(functions.LL_calc, args)
            TFIntervaldict[TF].append(result)

    return TFIntervaldict
def run():
    print("Writing all HTML files")
    page = open("DataFromLabs.html", "r").read()

    soup = bs4.BeautifulSoup(page, "html.parser")

    # List of homepages
    homepages = []

    # Puts the html for each entry number into a list
    pageIDs = soup.find_all("span", {"class": "model-id"})

    #    maxThreads = 150

    #    for i in range(maxThreads):
    #        t = threading.Thread(target=writeHTML)
    #        t.daemon = True
    #        t.start()

    global totalEntries
    totalEntries = len(pageIDs)

    elems = []
    for index in range(totalEntries):
        elems.append(pageIDs[index].text)
        # writeHTML(pageIDs[index].text)

    pool = Pool(processes=8)
    pool.map(writeHTML, elems)
def translate_concurrent(input_object, url, weights=None, num_processes=8):

    pool = Pool(processes=num_processes)
    text_args = [(line, weights, url) for line in input_object]

    for translated_line in pool.imap(translate_single_line, text_args):
        print translated_line
Example #15
0
    def _map_pop(self, sched_func, cpu_count):
        """ _hpop runs the firefly algorithm 
        """

        # initialize our process pool
        pool = Pool(processes=cpu_count)

        def set_pop(fly):
            """weird hack to get the pickled population alpha to be set
            """

            fly.pop = self
            return fly

        # start at 2 for the log function. do same amount of steps
        for i in xrange(2, self.gen + 2):
            # calculate our new alpha value based on the annealing schedule
            self.alpha = sched_func(i)

            # copy our population over to old one as well
            self._copy_pop()

            # annoying hack to get around the cached pickle of pop
            self.pop[:] = [set_pop(fly) for fly in self.pop]

            # map our current population to a new one
            self.pop[:] = pool.map(map_fly, self.pop)
            self.pop.sort()
Example #16
0
def main():
    """
    Method to kick things off
    """

    # Setup workers
    pool = Pool(processes=CPU_COUNT)

    # Prepare URLs
    urls = []
    for url in CRAWL_URLS:
        urls.append(str(BASE_URL + url))

    if USE_ES:
        # Create connection
        es = ElasticSearch(ES_URL)

        try:
            # Delete the existing index
            es.delete_index(ES_INDEX)
        except:
            # In case the index does not exist
            pass

        # Create the index to use
        es.create_index(ES_INDEX)

    else:
        # Setup the database tables, connect
        init_db()

    # Scrape and store async
    pool.map(scrape, urls)
def run_in_parallel(files):
    p = Pool(processes=10)
    results = p.map(show_top_targets, files)

    # combine results
    print "Combining."
    total_click_counts = {}
    for click_counts in results:
        for target in click_counts:
            if target not in total_click_counts:
                total_click_counts[target] = 0
            total_click_counts[target] += click_counts[target]
    total_clicks = numpy.sum(total_click_counts.values())

    # sort the results
    sorted_click_counts = sorted(
        [(target, total_click_counts[target]) for target in total_click_counts], key=lambda item: item[1], reverse=True
    )

    cum_clicks = 0
    for target, clicks in sorted_click_counts:
        cum_clicks += clicks
        percentile = float(cum_clicks) / total_clicks
        if percentile <= 20:
            print target, clicks, percentile
Example #18
0
def crack(ssid, clientMac, APMac, Anonce, Snonce, mic, data, passQueue):
    foundPassQ = Queue()
    try:
        timeA = datetime.now()
        startSize = passQueue.qsize()
    except:
        pass
    pool = Pool(numOfPs, crackProcess, (ssid, clientMac, APMac, Anonce, Snonce, mic, data, passQueue, foundPassQ))
    while True:
        sleep(1)
        try:
            timeB = datetime.now()
            currentSize = passQueue.qsize()
            print str(100 - 100.0 * currentSize / startSize) + "% done. " + str(
                (startSize - currentSize) / (timeB - timeA).total_seconds()
            ) + " hashes per second"
        except:
            pass
        if foundPassQ.empty():
            if passQueue.empty():
                returnVal = False
                break
        else:
            passphrase = foundPassQ.get()
            returnVal = passphrase
            break
    pool.terminate()
    return returnVal
Example #19
0
    def execute(self, processes=2, verbose=False):
        pool = Pool(processes=processes)
        result = []
        if verbose:
            print("")

        for i, _ in enumerate(pool.imap_unordered(self.profileOneColumn, self.columns)):
            if _ is None:
                continue  # TBD: we should write this one to a log file

            result.append(_)

            if verbose:
                sys.stdout.write("\033[1A")
                totalprogress = "\r\033[K## progress {0}/{1}: {2:.2f}% : {3} \n".format(
                    i + 1,
                    len(self.columns),
                    round(i / (len(self.columns) - 1) * 100, 2),
                    str(_.tablename + "." + _.columnname),
                )
                sys.stdout.write(totalprogress)
                sys.stdout.flush()

        pool.close()
        return result
def proof_of_work(salt, nBytes):
    n_processes = 8
    batch_size = int(2.5e5)
    pool = Pool(n_processes)

    nonce = 0

    while True:
        nonce_ranges = [(nonce + i * batch_size, nonce + (i + 1) * batch_size) for i in range(n_processes)]

        params = [(salt, nBytes, nonce_range) for nonce_range in nonce_ranges]

        # Single-process search:
        # solutions = map(find_solution, params)

        # Multi-process search:
        solutions = pool.map(find_solution, params)

        print("Searched %d to %d" % (nonce_ranges[0][0], nonce_ranges[-1][1] - 1))

        # Find non-None results
        solutions = filter(None, solutions)

        if solutions:
            return solutions

        nonce += n_processes * batch_size
Example #21
0
File: LS.py Project: arcoslab/cmoc
def create_data(cxrange, cyrange, step, l1, l2, u, den):
    list = []
    p = Pool()
    arguments = []
    steps = 8.00001
    for cy in arange(-2.0 * l2, 2.0 * l2, 2.0 * l2 / steps):
        for cx in arange(-2.0 * l1, 2.0 * l1, 2.0 * l1 / steps):
            arguments.append((l1, l2, cx, cy, u, den))

    for i in arange(steps / 2.0):
        for j in arange(steps / 2.0):
            cx = 2.0 * l1 * exp(j) / e
            cy = 2.0 * l2 * exp(i) / e
            cx = 2.0 * l1 * power(1.7, j) / 1.7
            cy = 2.0 * l2 * power(1.7, i) / 1.7
            arguments.append((l1, l2, cx, cy, u, den))
            arguments.append((l1, l2, -cx, cy, u, den))
            arguments.append((l1, l2, cx, -cy, u, den))
            arguments.append((l1, l2, -cx, -cy, u, den))
    #    for cy in arange(-cyrange,cyrange,step):
    #        print "processing cy:", cy
    #        LS_spec=lambda cx: LS_point(1,1,cx,cy,1,1)
    #        arguments=[]
    #        for cx in arange(-cxrange,cxrange,step):
    #            arguments.append((1,1,cx,cy,1,1))
    print arguments
    list += p.map(LS_point, arguments)
    #        for cx in arange(-cxrange,cxrange,step):
    #            list.append([fx(1,1,cx,cy,1,1)[0], fy(1,1,cx,cy,1,1)[0], m(1,1,cx,cy,1,1)[0]])
    #        print list
    return list
def main(args):
    """
    Main function
    :param args: argparse dict
    :return: None
    """

    print "Start reindexing from {0} to {1} with batch size of {2} and {3} worker processes".format(
        args.source_index, args.destination_index, args.batch_size, args.processes
    )

    client = Elasticsearch()
    print "connected to elastic search at http://localhost:9200"

    docs = scan(client, index=args.source_index)

    count = 0

    queue = Queue(args.batch_size)  # don't fill up queue too much
    pool = Pool(args.processes, worker_main, (queue, args.source_index, args.destination_index, args.batch_size))

    for doc in docs:
        count += 1
        if count % args.batch_size == 0:
            print "put {0}".format(count)
        queue.put(doc, True)
    print "put {0}".format(count)

    # send stop messages
    for i in range(args.processes):
        queue.put(Stop, True)

    pool.close()
    pool.join()
Example #23
0
def multi_mode(start, stop):
    print "going multi"
    from multiprocessing import Pool

    pool = Pool(processes=4)
    result = pool.map(factorize, xrange(start, stop + 1), chunksize=100)
    print uniq_counter(result)
Example #24
0
def get_parsed_trees_multi_documents(contents, args):
    # distributes the jobs
    pool = Pool(args["jobs"])
    logging.info("Distributing %d jobs to %d workers", len(contents), args["jobs"])
    trees_list = pool.map(partial(wrap_parse, args=args), contents)
    pool.terminate()
    return trees_list
Example #25
0
def between_samples_segments(problem, s, t, params, num_processes=1):
    """Return IBD segments between samples in the set s and samples in the set t.
    s,t should be disjoint (if not, duplicate pairs will be present in the result set).
    If num_processes > 1, uses a multiprocessing pool with num_processes parallel processes."""
    pairs = list(it.product(s, t))
    if num_processes == 1:  # Serial
        return sum((sample_segments((problem, i, j, params, None)) for i, j in pairs), im.segment.SegmentSet([]))
    else:  # Parallel
        manager = Manager()
        lock = manager.Lock()

        # Map phase
        po = Pool(processes=num_processes)
        start = time.time()
        res = po.map(
            __hap_segments_task,
            (
                (IbdProblem(problem, (i, a), (j, b), None, params), lock)
                for i, j in pairs
                for a, b in it.product(im.constants.ALLELES, im.constants.ALLELES)
            ),
        )
        t = time.time() - start

        # Reduce phase
        total_segments = sum(res, im.segment.SegmentSet([]))
        if params.debug:
            print "Total %d hap pairs, %d segments" % (4 * len(pairs), total_segments.length)
            print "Elapsed Time: %.3f sec (%.3f sec/pair)" % (t, t / (4 * len(pairs)))
        return total_segments
Example #26
0
    def get_apartments_info(self, base_urls, apartment_base_url, pages_count=100):
        apartment_id = self.get_apartment_links(base_urls, pages_count)

        pool = Pool(self.thread_number)
        return pool.map(
            SeLogerScrapper.get_apartment_info_from_url, self.get_apartment_url(apartment_id, apartment_base_url)
        )
Example #27
0
def merge_parallel(inputs):
    """Process several merge jobs in parallel."""
    pool = Pool(CPUS)
    try:
        return pool.map(merge, inputs)
    finally:
        pool.close()
Example #28
0
def split_ga(filename):
    # Start 2 worker processes: we use subprocesses to avoid GIL
    pool = Pool(processes=4)

    r = RealReader(filename)
    ideal, non_garbage = r.read_cascade()

    print "Original quantum cost:", ideal.cost()
    print "Original gate count:", len(ideal)

    def partition(l, n):
        for i in xrange(0, len(l), n):
            yield l[i : i + n]

    max_cascade_length = 20

    # The most terrible list comprehension ever...
    cascade_list = [create_cascade(ideal.lines, gates) for gates in list(partition(ideal, max_cascade_length))]

    ideal_copy = ideal.copy()

    new_cascade_list = pool.map_async(smartGA_pool_runner, cascade_list).get(9999999)  # this is an UGLY python hack

    final_cascade = Cascade(ideal.lines)

    print "-----------------------------------"
    print "Final Cascade: "

    for cascade in new_cascade_list:
        for gate in cascade:
            print gate
            final_cascade.append(gate)

    print "Quantum Cost Improvement:", ideal_copy.cost() - final_cascade.cost()
    print "Gate Count Improvement:", len(ideal_copy) - len(final_cascade)
Example #29
0
def recover(back_dir):

    fileList = []
    procList = []
    greatCmd = """ find %s  -type f -name "*.gz" -size +%s |xargs -i ls -l {} |sort -nrk 5 |awk \'{print $NF}\' """ % (
        back_dir,
        FILESIZE,
    )
    eqCmd = """find {0}  -type f -name "*.gz" -size {1}""".format(back_dir, FILESIZE)
    lessCmd = """find {0}  -type f -name "*.gz" -size -{1}""".format(back_dir, FILESIZE)

    pool = Pool(processes=PROCESS_CONCURRENCY)

    for cmd in [greatCmd, eqCmd, lessCmd]:
        status, data = getstatusoutput(cmd)
        if status == 0 and data:
            fileList.extend(data.split("\n"))

    for recoverFile in fileList:
        db, file = recoverFile.split("/")[-1].split("-")[3], recoverFile
        p = pool.apply_async(source, [db, file])
        procList.append(p)

    for process in procList:
        process.get()
Example #30
0
def corrmat_u(cmat_x, xdists):
    """
    Convert X-space correlation matrix to U-space correlation matrix.
    """

    cpus = max(1, cpu_count() - 1)  # Don't use every core!
    rho_dict = {}

    n = len(xdists)
    cmat_u = eye(n)
    if cmat_x.sum() > n:
        # Generate correlation matrix in standard normal space, if necessary
        if __name__ == "ftrans":
            # Parallelise the calculation of equivalent rhos
            pool = Pool(cpus)
            rho_dict = {
                (i, j): pool.apply_async(nataf, [xdists[i], xdists[j], cmat_x[i, j]])
                for i in range(n)
                for j in range(i + 1, n, 1)
            }
            for ij in rho_dict.keys():
                rho_ij = rho_dict[ij].get(timeout=60)["x"]
                cmat_u[ij], cmat_u[ij[::-1]] = rho_ij, rho_ij
            pool.close()
    return cmat_u