Beispiel #1
0
def run(readSet, paramFile):

    # read parameter file
    parser = ConfigParser.SafeConfigParser()
    parser.optionxform = str
    parser.read(paramFile)

    # copy all options to a config object - both general params, and params for this readSet
    cfg = lambda: 0
    cfg.__dict__["readSet"] = readSet
    for section in ("general", readSet):
        for (paramName, paramVal) in parser.items(section):
            if paramName in cfg.__dict__:
                raise Exception(
                    "Config file contains duplicate specification of parameter: "
                    + paramName)
            cfg.__dict__[paramName] = paramVal
            print(paramName, paramVal)

    if str(cfg.numCores) == '0':
        # use all cores if numCores = 0
        cfg.numCores = str(cpu_count())

    # convert some params to boolean
    cfg.deleteLocalFiles = cfg.deleteLocalFiles.lower() == "true"

    # return config object
    return cfg
Beispiel #2
0
 def gt_sdsdb(self):
     """
     :return:
     """
     cache_file = os.path.join(self.cache_path, self.name + '_gt_sdsdb.pkl')
     """
     if os.path.exists(cache_file):
         with open(cache_file, 'rb') as fid:
             sdsdb = cPickle.load(fid)
         print '{} gt sdsdb loaded from {}'.format(self.name, cache_file)
         return sdsdb
     """
     # for internal useage
     tic()
     gt_sdsdb_temp = [
         self.load_coco_sds_annotation(index)
         for index in self.image_set_index
     ]
     gt_sdsdb = [x[0] for x in gt_sdsdb_temp]
     print 'prepare gt_sdsdb using', toc(), 'seconds'
     #objs = [x[1] for x in gt_sdsdb_temp]
     tic()
     generate_cache_seg_inst_kernel(gt_sdsdb_temp[0])
     pool = mp.Pool(mp.cpu_count())
     pool.map(generate_cache_seg_inst_kernel, gt_sdsdb_temp)
     pool.close()
     pool.join()
     print 'generate cache_seg_inst using', toc(), 'seconds'
     """
     with open(cache_file, 'wb') as fid:
         cPickle.dump(gt_sdsdb, fid, cPickle.HIGHEST_PROTOCOL)
     """
     # for future release usage
     # need to implement load sbd data
     return gt_sdsdb
Beispiel #3
0
    def collate(self):

        # Set the stacksize to be unlimited
        res.setrlimit(res.RLIMIT_STACK, (res.RLIM_INFINITY, res.RLIM_INFINITY))

        # Locate the FMS collation tool
        mppnc_path = None
        for f in os.listdir(self.expt.lab.bin_path):
            if f.startswith("mppnccombine"):
                mppnc_path = os.path.join(self.expt.lab.bin_path, f)
                break
        assert mppnc_path

        # Check config for collate command line options
        collate_flags = self.expt.config.get("collate_flags")
        if collate_flags is None:
            collate_flags = "-r -64"

        # Import list of collated files to ignore
        collate_ignore = self.expt.config.get("collate_ignore")
        if collate_ignore is None:
            collate_ignore = []
        elif type(collate_ignore) != list:
            collate_ignore = [collate_ignore]

        # Generate collated file list and identify the first tile
        tile_fnames = [f for f in os.listdir(self.output_path) if f[-4:].isdigit() and f[-8:-4] == ".nc."]

        mnc_tiles = defaultdict(list)
        for t_fname in tile_fnames:
            t_base, t_ext = os.path.splitext(t_fname)
            t_ext = t_ext.lstrip(".")

            # Skip any files listed in the ignore list
            if t_base in collate_ignore:
                continue

            mnc_tiles[t_base].append(t_fname)

        # If this is run interactively NCPUS is set in collate_cmd, otherwise
        # the cpu_count will return the number of CPUs assigned to the PBS job
        count = int(os.environ.get("NCPUS", multiprocessing.cpu_count()))
        pool = multiprocessing.Pool(processes=count)

        # Collate each tileset into a single file
        for nc_fname in mnc_tiles:
            nc_path = os.path.join(self.output_path, nc_fname)

            # Remove the collated file if it already exists, since it is
            # probably from a failed collation attempt
            # TODO: Validate this somehow
            if os.path.isfile(nc_path):
                os.remove(nc_path)

            cmd = "{} {} {} {}".format(mppnc_path, collate_flags, nc_fname, " ".join(mnc_tiles[nc_fname]))
            print(cmd)
            pool.apply_async(cmdthread, args=(cmd, self.output_path))

        pool.close()
        pool.join()
Beispiel #4
0
def run(readSet, paramFile, args):

    # read parameter file
    parser = ConfigParser.SafeConfigParser()
    parser.optionxform = str
    parser.read(paramFile)
    print(readSet)
    print(paramFile)
    print(args)
    # copy all options to a config object - both general params, and params for this readSet
    cfg = lambda: 0
    cfg.__dict__["readSet"] = readSet
    for (paramName, paramVal) in parser.items("general"):
        if paramName in cfg.__dict__:
            raise Exception(
                "Config file contains duplicate specification of parameter: " +
                paramName)
        cfg.__dict__[paramName] = paramVal
        print(paramName, paramVal)

    cfg.genomeFile = args.genomeFile
    cfg.numCores = args.numCores
    cfg.samtoolsMem = args.samtoolsMem
    cfg.readFile1 = args.readFile1
    cfg.readFile2 = args.readFile2
    cfg.primerFile = args.primerFile

    if str(cfg.numCores) == '0':
        # use all cores if numCores = 0
        cfg.numCores = str(cpu_count())

    # convert some params to boolean
    if "outputDetail" in cfg.__dict__:
        cfg.outputDetail = cfg.outputDetail.lower() == "true"
    else:
        cfg.outputDetail = False
    if "multimodal" in cfg.__dict__:
        cfg.multimodal = cfg.multimodal.lower() == "true"
    else:
        cfg.multimodal = False
    if "duplex" in cfg.__dict__:
        cfg.duplex = cfg.duplex.lower() == "true"
    else:
        cfg.duplex = False
    if "deleteLocalFiles" in cfg.__dict__:
        cfg.deleteLocalFiles = cfg.deleteLocalFiles.lower() == "true"
    else:
        cfg.deleteLocalFiles = False
    if "instrument" not in cfg.__dict__:  # IonTorrent, or
        cfg.instrument = "N/A"  # say the user forgot to specify this for Illumina - Use MiSeq as default

    # return config object
    return cfg
Beispiel #5
0
def lambda_multiMaster(start_time, isThreaded, envs, s3Enabled):
    global threadEvent
    objs = []
    q = None
    if isThreaded:
        threadEvent = 'Event' if s3Enabled else None
        print 'is THREADED mode'
        totalItems = len(envs)
        pools = auditMeth.poolThreadNumber(totalItems, mp.cpu_count())

        p = mp.Pool(1 if pools == 0 else pools)
        testing = False

        if not testing:
            xx = ({aID: e} for aID, e in envs.items())
            print xx
            results = p.map(executeLambda, ({
                aID: e
            } for aID, e in envs.items()))
        # results = None
        return results
    else:
        print 'is multi PROCESSOR mode'
        p = mp.Pool()
        m = mp.Manager()
        q = m.Queue()
        for aID, e in envs.items():
            # client = aconnect.__get_client__('lambda')
            pload = None
            pload = {aID: e}
            print pload
            results = p.apply_async(executeLambda, (pload, q))

    p.close()
    p.join()
    pyObj = {}
    if isThreaded:  ### WORKS IN AWS LAMBDA  ###
        for que in results:
            newobjs, newPyObj = que
            objs = objs + newobjs
            pyObj.update(newPyObj)
    else:
        while not q.empty():
            newobjs, newPyObj = q.get()
            objs = objs + newobjs
            pyObj.update(newPyObj)

    # lambda_writeResult(upload2S3, pyObj, Main_bucket,sumoName)

    print("--- %s seconds B---" % (time.time() - start_time))
    return results
Beispiel #6
0
    def _write_coco_results(self, detections, res_file, ann_type):
        """ example results
        [{"image_id": 42,
          "category_id": 18,
          "bbox": [258.15,41.29,348.26,243.78],
          "score": 0.236}, ...]
        """
        all_im_info = [{
            'index': index,
            'height': self.annos[index]['image_height'],
            'width': self.annos[index]['image_width']
        } for index in self.image_set_index]

        if ann_type == 'bbox':
            data_pack = [{
                'cat_id': self._class_to_vispr_ind[cls],
                'cls_ind': cls_ind,
                'cls': cls,
                'attr_id': cls,
                'ann_type': ann_type,
                'binary_thresh': self.binary_thresh,
                'all_im_info': all_im_info,
                'boxes': detections[0][cls_ind]
            } for cls_ind, cls in enumerate(self.classes)
                         if not cls == '__background__']
        elif ann_type == 'segm':
            data_pack = [{
                'cat_id': self._class_to_vispr_ind[cls],
                'cls_ind': cls_ind,
                'cls': cls,
                'attr_id': cls,
                'ann_type': ann_type,
                'binary_thresh': self.binary_thresh,
                'all_im_info': all_im_info,
                'boxes': detections[0][cls_ind],
                'masks': detections[1][cls_ind]
            } for cls_ind, cls in enumerate(self.classes)
                         if not cls == '__background__']
        else:
            print 'unimplemented ann_type: ' + ann_type
        # results = coco_results_one_category_kernel(data_pack[1])
        # print results[0]
        pool = mp.Pool(mp.cpu_count())
        results = pool.map(coco_results_one_category_kernel, data_pack)
        pool.close()
        pool.join()
        results = sum(results, [])
        print 'Writing results json to %s' % res_file
        with open(res_file, 'w') as f:
            json.dump(results, f, sort_keys=True, indent=4)
Beispiel #7
0
    def _parallelize(self, work, processes=2):
        if not processes:
            # use 2x number of available CPUs
            processes = 2 * mp.cpu_count()

        # compute chunk size
        connection_pool_size = self._adapter.poolmanager.connection_pool_kw[
            'maxsize']
        chunksize = max(1, 2 * len(work) // int(connection_pool_size))

        # create process pool and run `func` in parallel
        logger.debug('using %d parallel processes', processes)
        pool = mp.Pool(processes)
        return pool.map(self.__do, work, chunksize)
Beispiel #8
0
    def _setup_pool(self):
        number_of_procs = globals.s3_multipart_max_procs
        if not number_of_procs:
            number_of_procs = multiprocessing.cpu_count()

        if getattr(self, '_pool', False):
            log.Debug("A process pool already exists. Destroying previous pool.")
            self._pool.terminate()
            self._pool.join()
            self._pool = None

        log.Debug("Setting multipart boto backend process pool to %d processes" % number_of_procs)

        self._pool = multiprocessing.Pool(processes=number_of_procs)
 def create_consumers(self, customer_count=0):
     """
         Summary:
             创建消费者
     :return:
     """
     consumer_count = customer_count or multithreading.cpu_count()
     consumer_list = list()
     for _ in range(consumer_count):
         curr_consumer = multithreading.Process(
             target=self.worker, args=())
         curr_consumer.daemon = True  # 设置为守护进程,主进程结束则子进程结束
         consumer_list.append(curr_consumer)
         curr_consumer.start()
     return consumer_list
Beispiel #10
0
    def __init__(self, list, output, threads, output_type, nothread):

        if threads == None:
            threads = cpu_count()
        if output_type == None:
            output_type = "txt"

        self.list = list
        self.output = output
        self.threads = threads
        self.output_type = output_type
        self.nothread = nothread

        self.accounts_array = []
        self.results_array = []
Beispiel #11
0
def append_protein_names_to_workbook(workbook):
    # Workers pool.
    pool = Pool(cpu_count() * 2)

    # Process each worksheet.
    sheet_names = workbook.get_sheet_names()
    # Used to search for accession column.
    accession_re = re.compile(r'.*accession$')
    print('Processing {0} sheets'.format(len(sheet_names)))
    for name in sheet_names:
        sheet = workbook.get_sheet_by_name(name)
        # Get column with accessions.
        accession_col = 2
        for col_i in range(2, sheet.max_column + 1):
            cell = sheet.cell(row=2, column=col_i)
            if cell and cell.value and accession_re.match(cell.value):
                accession_col = col_i
                break
        # Get protein data.
        accessions = []
        for row_i in range(2, sheet.max_row + 1):
            cell = sheet.cell(row=row_i, column=accession_col)
            # Skip empty rows.
            if not(cell and cell.value):
                continue
            accessions.append(cell.value)

        # Fetch proteins.
        proteins = pool.map(fetch_proteins, accessions)

        # Write protein data.
        for row_i, protein in enumerate(proteins, 2):
            cell = sheet.cell(row=row_i, column=1)
            if not cell.value or (protein and cell.value != protein):
                cell.value = protein

        print_progress()

    # Wait for pool threads to finish.
    pool.close()
    pool.join()

    print_progress_end()

    return
 def _independent_tranform_thunk(obs):
     results = {}
     if multithread:
         pool = mp.pool(min(mp.cpu_count(), len(sensor_shapes)))
         pool.map()
     else:
         for name, transform in transforms.items():
             try:
                 results[name] = transform(obs[name])
             except Exception as e:
                 print(
                     f'Problem applying preproces transform to {name}.',
                     e)
                 raise e
     for name, val in obs.items():
         if name not in results and keep_unnamed:
             results[name] = val
     return SensorPack(results)
Beispiel #13
0
def main(args: argparse.Namespace):  # noqa
    if args.num_workers == 0:
        import multiprocessing.dummy as multiprocessing
        args.num_workers = 1
    else:
        import multiprocessing

    if args.num_workers < 0:  # Use all cores
        args.num_workers = multiprocessing.cpu_count()

    def create_chunk_input_stream():
        chunk = []
        for f in args.inputs:
            for line in f:
                chunk.append(line)
                if len(chunk) >= 256:
                    yield chunk
                    chunk = []
        if len(chunk) > 0:
            yield chunk

    pbar = None
    if args.show_pbar:
        pbar = tqdm()

    with multiprocessing.Pool(
        args.num_workers,
        initializer=worker_init_fn,
        initargs=[args.lang]
    ) as pool:
        for chunk in pool.imap(worker_fn, create_chunk_input_stream()):
            if pbar is not None:
                pbar.update(len(chunk))
            for sents in chunk:
                for sent in sents:
                    args.output.write(sent + '\n')
                if args.verbose and len(sents) > 1:
                    sys.stderr.write('\rSplitting done: {}\n'.format(sents))
    sys.stderr.flush()
    args.output.flush()

    if pbar is not None:
        pbar.close()
Beispiel #14
0
def main(args: argparse.Namespace):  # noqa
    if args.num_workers == 0:
        import multiprocessing.dummy as multiprocessing
        args.num_workers = 1
    else:
        import multiprocessing

    if args.num_workers < 0:  # Use all cores
        args.num_workers = multiprocessing.cpu_count()

    def create_chunk_input_stream():
        chunk = []
        for f in args.inputs:
            for line in f:
                chunk.append(line)
                if len(chunk) >= 256:
                    yield chunk
                    chunk = []
        if len(chunk) > 0:
            yield chunk

    pbar = None
    if args.show_pbar:
        pbar = tqdm()

    with multiprocessing.Pool(
            args.num_workers,
            initializer=worker_init_fn,
            initargs=[args.lang, args.annotate_hyphens,
                      args.protect_urls]) as pool:
        for chunk in pool.imap(worker_fn, create_chunk_input_stream()):
            if pbar is not None:
                pbar.update(len(chunk))
            for line in chunk:
                args.output.write(line + '\n')
    args.output.flush()

    if pbar is not None:
        pbar.close()
Beispiel #15
0
                collection_result.insert(item)
            except Exception, e:
                print e

        try:
            data_detail_js['_id'] = routine['_id']
            collection_result1.insert(data_detail_js)
        except Exception, e:
            print e
        try:
            data_mdskip_js['_id'] = routine['_id']
            collection_result2.insert(data_mdskip_js)
        except Exception, e:
            print e
        with open('ids.txt', 'a') as f:
            f.write('%s\n' % routine['_id'])
        print 'Finish %s' % routine['_id']
    except Exception, e:
        print e
        try:
            collection_failure.insert(routine)
        except Exception, e:
            print e


if __name__ == '__main__':
    pool = Pool(cpu_count())
    pool.map(run, collection_html.find())
    pool.close()
    pool.join()
Beispiel #16
0
import urlparse
import zlib

import boto3
import gflags
import requests

logger = logging.getLogger(__name__)
FLAGS = gflags.FLAGS

# The base Common Crawl CDX URL to use.
DEFAULT_CDX_SERVER_URL = 'http://index.commoncrawl.org/'

gflags.DEFINE_integer(
    'num_threads',
    mp.cpu_count(),
    'The number of worker threads to use.')
gflags.DEFINE_string(
    'cdx_server_url',
    'http://index.commoncrawl.org/',
    'The Common Crawl CDX server to use.')
gflags.DEFINE_string(
    'warc_s3_bucket',
    'aws-publicdatasets',
    'The S3 bucket from which WARC files are retrieved.')


class Error(Exception):
    pass

Beispiel #17
0
from . import encoder
from . import util

__version__ = '0.3.2'
__author__ = 'Patrick C. McGinty'
__email__ = '*****@*****.**'

DEFAULT_ENCODER = 'aac'

# define a mapping of enocoder-types to implementation class name.
ENCODERS = {
    'aac': encoder.AacEncoder,
    'ogg': encoder.OggEncoder,
    'mp3': encoder.Mp3Encoder,
}
CORES = mp.cpu_count()


#############################################################################
class WorkUnit(object):
    """
   Processing unit for transcoding a single file.

   Multiple instances of this class are asynchronously executed in a
   multiprocessing worker pool queue.
   """
    def __init__(self, opts, max_work):
        """
      :param opts:   Parsed command-line options.
      :type  opts:   :mod:`optparse`.Values
Beispiel #18
0
def main():
    optparser = optparse.OptionParser( usage="%prog [options] [path/to/catchsegv.exe] [path/to/test/apps] ..")
    optparser.add_option(
        '-R', '--regex', metavar='REGEX',
        type="string", dest="regex",
        default = '.*')
    optparser.add_option(
        '-v', '--verbose',
        action="store_true",
        dest="verbose", default=False)
    
    global options
    (options, args) = optparser.parse_args(sys.argv[1:])

    if len(args) >= 1:
        catchsegvExe = args[0]
    else:
        catchsegvExe = os.path.join('bin', 'catchsegv.exe')
    if not os.path.isfile(catchsegvExe):
        optparser.error('error: %s does not exist\n' % catchsegvExe)
        sys.exit(1)

    if len(args) >= 2:
        testsExeDirs = args[1:]
    else:
        testsExeDirs = [os.path.normpath(os.path.join(os.path.dirname(catchsegvExe), '..', 'tests', 'apps'))]

    testsSrcDir = os.path.dirname(__file__)

    testNameRe = re.compile(options.regex)

    if sys.platform == 'win32':
        import ctypes
        SEM_FAILCRITICALERRORS     = 0x0001
        SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
        SEM_NOGPFAULTERRORBOX      = 0x0002
        SEM_NOOPENFILEERRORBOX     = 0x8000
        uMode = ctypes.windll.kernel32.SetErrorMode(0)
        uMode |= SEM_FAILCRITICALERRORS \
              |  SEM_NOALIGNMENTFAULTEXCEPT \
              |  SEM_NOGPFAULTERRORBOX \
              |  SEM_NOOPENFILEERRORBOX
        ctypes.windll.kernel32.SetErrorMode(uMode)

    failedTests = []

    numJobs = multiprocessing.cpu_count()
    pool = multiprocessing.Pool(numJobs)

    testSrcFiles = os.listdir(testsSrcDir)
    testSrcFiles.sort()

    testArgs = []
    for testSrcFile in testSrcFiles:
        testName, ext = os.path.splitext(testSrcFile)

        if ext not in ('.c', '.cpp'):
            continue

        if not testNameRe.search(testName):
            continue

        if sys.platform != 'win32' and testName in ('ctrl_break', 'ctrl_c'):
            continue

        testSrc = os.path.join(testsSrcDir, testSrcFile)

        for testsExeDir in testsExeDirs:
            testExe = os.path.join(testsExeDir, testName + '.exe')
            if not os.path.isfile(testExe):
                sys.stderr.write('fatal: %s does not exist\n' % testExe)
                sys.exit(1)

            testArgs.append((catchsegvExe, testExe, testSrc))

    if numJobs <= 1 or len(testArgs) <= 1:
        imap = map
    else:
        imap = pool.imap_unordered

    for testName, testResult in imap(test, testArgs):
        if not testResult:
            failedTests.append(testName)

    #sys.stdout.write('1..%u\n' % numTests)
    if failedTests:
        sys.stdout.write('# %u tests failed\n' % len(failedTests))
        for failedTest in failedTests:
            sys.stdout.write('# - %s\n' % failedTest)
        sys.exit(1)

    sys.exit(0)
Beispiel #19
0
                collection_result.insert(item)
            except Exception, e:
                print e

        try:
            data_detail_js['_id'] = routine['_id']
            collection_result1.insert(data_detail_js)
        except Exception, e:
            print e
        try:
            data_mdskip_js['_id'] = routine['_id']
            collection_result2.insert(data_mdskip_js)
        except Exception, e:
            print e
        with open('ids.txt', 'a') as f:
            f.write('%s\n' % routine['_id'])
        print 'Finish %s' % routine['_id']
    except Exception, e:
        print e
        try:
            collection_failure.insert(routine)
        except Exception, e:
            print e


if __name__ == '__main__':
    pool = Pool(cpu_count())
    pool.map(run, collection_html.find())
    pool.close()
    pool.join()
Beispiel #20
0
    def ec2Item(self, current, aconnect, itemlist):
        printColor(['_____LISTING EC2 [] now....in .%s' % (aconnect._region)])
        threaded = aconnect.getMultiThread()
        objs = []
        lfound = []
        rows = []
        #processes = mp.cpu_count()*2
        #p = mp.Pool(processes)
        #m = mp.Manager(processes=processes*2)
        #q = m.Queue(processes*2)
        #print itemlist
        pools = 2  #171.35 seconds  #168 seconds
        totalItems = len(itemlist)
        if threaded:
            pools = auditMeth.poolThreadNumber(totalItems, mp.cpu_count())
            if pools > 0:
                p = mp.Pool(pools)
            elif pools == 0:
                threaded = False
        else:
            p = mp.Pool()
            m = mp.Manager()
            q = m.Queue()
        nEnv = aconnect._env
        if totalItems > 0:
            objs.append([
                'Name[%s]' % (current.svc), 'Audit', 'Owner', 'type',
                'Platform', 'SecurityGroups', 'Instance', 'Zone', 'VPC',
                'State', 'cpu_week', 'cost', 'spot'
            ])

        if not threaded:
            for unit in itemlist:
                name = unit['Name']
                account = unit['OwnerId']
                if str(nEnv) in name or aconnect._useAccounts:
                    #p = Process(target=self.ec2Define,args=(current,aconnect,unit,name))
                    if pools == 0:
                        newobjs, row = ec2Define(unit, name)
                    else:
                        getit = p.apply_async(ec2Define, (unit, name, q))
                    #getit.get()
                    lfound.append(name)
                    #newobjs,row = ec2Define(current,aconnect,unit,name)
                    #objs = objs+newobjs
                    #rows.append(row)
        else:
            #print itemlist
            lfound = [unit['Name'] for unit in itemlist]
            results = p.map(ec2Define, (unit for unit in itemlist))

        if pools > 0:
            p.close()
            p.join()
            print results
            if threaded:
                for que in results:
                    newobjs, row = que
                    #print ' compute --got[C] result', row
                    objs = objs + newobjs
                    rows.append(row)
            else:
                while not q.empty():
                    newobjs, row = q.get()
                    objs = objs + newobjs
                    #print row
                    #print newobjs
                    rows.append(row)

        return (lfound, objs, rows)
Beispiel #21
0
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
from tqdm import tqdm
import multiprocessing.dummy as multiprocessing
from pathos.multiprocessing import ProcessingPool as Pool
from functools import partial
IMG_BIT_DEPTH = 2**8

num_workers = multiprocessing.cpu_count()
# num_workers = 1
print("CPU number: " + str(num_workers))

# num_workers *= 2


def list_split(items, n):
    return [items[i:i + n] for i in range(0, len(items), n)]


def func(zip_args):
    img, pic_path, pic = zip_args
    # 把图片转换为灰度图
    gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # gray_img = img
    # 获取灰度图矩阵的行数和列数
    r, c = gray_img.shape[:2]
    dark_sum = 0  # 偏暗的像素 初始化为0个
Beispiel #22
0
    help=
    "The combo list is a list with users and passwords in a 'username:password' format."
)
parser.add_argument(
    "output_file_name",
    help=
    "Only the name of the file. The extension will be determined by the type of output selected."
)
parser.add_argument(
    "--output_type",
    help="The output type can be: txt, json, xml and html (Default: txt).",
    action="store",
    type=str)
parser.add_argument(
    "--threads",
    help=
    "Number of workers that SpotCheck uses (A very high number can cause an error in the program due to the limitations of your computer) (Default: {})."
    .format(cpu_count()),
    type=int,
    action="store")
parser.add_argument(
    "--nothreads",
    help=
    "If this argument is specified, SpotCheck will not create any thread, otherwise the main SpotCheck process will perform the checks.",
    action="store_true",
    default=False)

args = parser.parse_args()

Main(args.combo_list, args.output_file_name, args.threads, args.output_type,
     args.nothreads).start_check()
Beispiel #23
0
    def rdsItem(self, current, aconnect, itemlist):
        added = False
        printColor(['_____LISTING RDS [] now....in .%s' % (aconnect._region)])
        threaded = aconnect.getMultiThread()
        nEnv = aconnect._env
        lfound = []
        objs = []
        rows = []

        pools = 2  # 171.35 seconds  #168 seconds
        totalItems = len(itemlist)
        q = None
        if threaded:
            pools = auditMeth.poolThreadNumber(totalItems, mp.cpu_count())

            p = mp.Pool(1 if pools == 0 else pools)

        else:
            p = mp.Pool()
            m = mp.Manager()
            q = m.Queue()
        if totalItems > 0:
            objs.append([
                'Name[%s]' % (current.svc), 'Audit', 'Owner', 'Engine',
                'Size (GB)', 'Instance', 'MutliAZ', 'VPC', 'last_Modified',
                'connections', 'cost'
            ])
        if not threaded:
            for unit in itemlist:
                name = unit['DBInstanceIdentifier']
                #sg=unit['VpcSecurityGroups'][0]['VpcSecurityGroupId']
                #ec2 = aconnect.__get_client__('ec2')
                #sgUnit = ec2.describe_security_groups(GroupIds=[sg])['SecurityGroups'][0]
                #account = sgUnit['OwnerId']
                if nEnv in name or aconnect._useAccounts:
                    if pools == 0:
                        objs, row = rdsDefine(unit, name, q)
                    else:
                        getit = p.apply_async(rdsDefine, (unit, name, q))
                    #getit.get()
                    lfound.append(name)

                    #objs,row = self.rdsDefine(current,aconnect,objs,unit,name)
                    #rows.append(row)
        else:
            #print itemlist
            lfound = [unit['DBInstanceIdentifier'] for unit in itemlist]
            results = p.map(rdsDefine, (unit for unit in itemlist))

        if pools > 0:
            p.close()
            p.join()
            if threaded:
                for que in results:
                    newobjs, row = que
                    #print ' DYNAMO --got[C] result', row
                    objs = objs + newobjs
                    rows.append(row)
            else:
                while not q.empty():
                    newobjs, row = q.get()
                    objs = objs + newobjs
                    # print newobjs
                    rows.append(row)
        print objs
        return (lfound, objs, rows)
from collections import namedtuple, Counter
from Statistics import centre_of_mass
import csv
import pandas as pd
import numpy as np
# make this really fast:
import multiprocessing.dummy as multiprocessing
CPU_COUNT = multiprocessing.cpu_count()

D_PM = 1 << 30  # convert to driver
R_PM = 1 << 31  # convert to resistant
L_PM = (1 << 30) - 1  # return from driver
L_PM2 = (1 << 31) - 1  # return from resistant

Cell_ = namedtuple('Cell', 'x y z genotype')
Cell_.__new__.__defaults__ = (0, 0, 0, -1)


class Cell(Cell_):
    """
	Generic holder for cell details.
	@params:
	  id = cell ID
	  x,y,z = coordinates of the cell in the tumor
	  genotype = the genotype ID of this cell
  """
    @staticmethod
    def load_cells(file_name):
        """
	Returns a list of Cells
	@params:
Beispiel #25
0
    def run(self):
        """Start process all shots with a processbar."""

        shots = self.get_shot_list()
        shots_info = dict.fromkeys(self._all_shots, '本次未处理')
        is_multi_threading = self.flags & MULTI_THREADING
        thread_count = cpu_count() if is_multi_threading else 1
        pool = Pool(thread_count)
        proc_queue = Queue()
        cancel_event = Event()

        def _run(shot):
            if cancel_event.is_set():
                return '取消: {}'.format(shot)

            output = os.path.join(
                CONFIG['output_dir'], '{}_v0.nk'.format(shot))
            input_dir = shot if os.path.isdir(
                shot) else os.path.join(CONFIG['input_dir'], shot)
            cmd = u'"{nuke}" -t -priority low "{script}" "{input_dir}" "{output}"'.format(
                nuke=nuke.EXE_PATH,
                script=__path__,
                input_dir=input_dir,
                output=output
            )

            try:
                LOGGER.info('%s:开始', shot)
                proc = Popen(cmd,
                             #  shell=True,
                             stdout=PIPE,
                             stderr=PIPE)
                proc_queue.put(proc)
                stderr = u(proc.communicate()[1])

                if START_MESSAGE in stderr:
                    stderr = stderr.partition(
                        START_MESSAGE)[2].strip()

                if stderr:
                    shots_info[shot] = stderr
                elif proc.returncode:
                    if cancel_event.is_set():
                        shots_info[shot] = '用户取消'
                    else:
                        shots_info[shot] = 'Nuke非正常退出: {}'.format(
                            proc.returncode)
                else:
                    shots_info[shot] = '正常完成'

                LOGGER.info('%s:结束', shot)
            except:
                shots_info[shot] = traceback.format_exc()
                LOGGER.error('Unexpected exception during comp', exc_info=True)
                raise RuntimeError

            return '完成: {}'.format(shot)

        if is_multi_threading:
            _run = run_with_memory_require(8)(_run)

        def _oncancel():
            cancel_event.set()
            while not proc_queue.empty():
                proc = proc_queue.get()
                if proc.poll() is None:
                    try:
                        proc.terminate()
                    except OSError:
                        pass

        try:
            for _ in progress(pool.imap_unordered(_run, shots),
                              name='批量合成',
                              total=len(shots),
                              start_message=(
                                  '正在使用 {} 线程进行……'.format(thread_count)),
                              oncancel=_oncancel):
                pass
        except (CancelledError, RuntimeError):
            pass

        webbrowser.open(self.generate_report(shots_info))
        webbrowser.open(CONFIG['output_dir'])
Beispiel #26
0
from . import decoder
from . import encoder
from . import util

__version__ = '0.3.2'
__author__ = 'Patrick C. McGinty'
__email__ = '*****@*****.**'

DEFAULT_ENCODER = 'aac'

# define a mapping of enocoder-types to implementation class name.
ENCODERS = {'aac':encoder.AacEncoder,
            'ogg':encoder.OggEncoder,
            'mp3':encoder.Mp3Encoder,
         }
CORES = mp.cpu_count()


#############################################################################
class WorkUnit( object ):
   """
   Processing unit for transcoding a single file.

   Multiple instances of this class are asynchronously executed in a
   multiprocessing worker pool queue.
   """
   def __init__( self, opts, max_work ):
      """
      :param opts:   Parsed command-line options.
      :type  opts:   :mod:`optparse`.Values
Beispiel #27
0
import sys, os, re, traceback
from os.path import isfile
from multiprocessing.dummy import Pool, cpu_count
from counter import Counter
from ops.rotate import Rotate
from ops.fliph import FlipH
from ops.flipv import FlipV
from ops.zoom import Zoom
from ops.blur import Blur
from ops.noise import Noise
from ops.translate import Translate
from skimage.io import imread, imsave

EXTENSIONS = ['png', 'jpg', 'JPG', 'jpeg', 'JPEG', 'bmp']
WORKER_COUNT = max(cpu_count() - 1, 1)
OPERATIONS = [Rotate, FlipH, FlipV, Translate, Noise, Zoom, Blur]
'''
Augmented files will have names matching the regex below, eg

    original__rot90__crop1__flipv.jpg

'''
AUGMENTED_FILE_REGEX = re.compile('^.*(__.+)+\\.[^\\.]+$')
EXTENSION_REGEX = re.compile('|'.join(
    map(lambda n: '.*\\.' + n + '$', EXTENSIONS)))

thread_pool = None
counter = None


def build_augmented_file_name(original_name, ops):
Beispiel #28
0
def main():
    optparser = optparse.OptionParser(
        usage="%prog [options] [path/to/catchsegv.exe] [path/to/test/apps] ..")
    optparser.add_option('-R',
                         '--regex',
                         metavar='REGEX',
                         type="string",
                         dest="regex",
                         default='.*')
    optparser.add_option('-v',
                         '--verbose',
                         action="store_true",
                         dest="verbose",
                         default=False)

    global options
    (options, args) = optparser.parse_args(sys.argv[1:])

    if len(args) >= 1:
        catchsegvExe = args[0]
    else:
        catchsegvExe = os.path.join('bin', 'catchsegv.exe')
    if not os.path.isfile(catchsegvExe):
        optparser.error('error: %s does not exist\n' % catchsegvExe)
        sys.exit(1)

    if len(args) >= 2:
        testsExeDirs = args[1:]
    else:
        testsExeDirs = [
            os.path.normpath(
                os.path.join(os.path.dirname(catchsegvExe), '..', 'tests',
                             'apps'))
        ]

    testsSrcDir = os.path.dirname(__file__)

    testNameRe = re.compile(options.regex)

    if sys.platform == 'win32':
        import ctypes
        SEM_FAILCRITICALERRORS = 0x0001
        SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
        SEM_NOGPFAULTERRORBOX = 0x0002
        SEM_NOOPENFILEERRORBOX = 0x8000
        uMode = ctypes.windll.kernel32.SetErrorMode(0)
        uMode |= SEM_FAILCRITICALERRORS \
              |  SEM_NOALIGNMENTFAULTEXCEPT \
              |  SEM_NOGPFAULTERRORBOX \
              |  SEM_NOOPENFILEERRORBOX
        ctypes.windll.kernel32.SetErrorMode(uMode)

    failedTests = []

    numJobs = multiprocessing.cpu_count()
    pool = multiprocessing.Pool(numJobs)

    testSrcFiles = os.listdir(testsSrcDir)
    testSrcFiles.sort()

    testArgs = []
    for testSrcFile in testSrcFiles:
        testName, ext = os.path.splitext(testSrcFile)

        if ext not in ('.c', '.cpp'):
            continue

        if not testNameRe.search(testName):
            continue

        if sys.platform != 'win32' and testName in ('ctrl_break', 'ctrl_c'):
            continue

        testSrc = os.path.join(testsSrcDir, testSrcFile)

        for testsExeDir in testsExeDirs:
            testExe = os.path.join(testsExeDir, testName + '.exe')
            if not os.path.isfile(testExe):
                sys.stderr.write('fatal: %s does not exist\n' % testExe)
                sys.exit(1)

            testArgs.append((catchsegvExe, testExe, testSrc))

    if numJobs <= 1 or len(testArgs) <= 1:
        imap = map
    else:
        imap = pool.imap_unordered

    for testName, testResult in imap(test, testArgs):
        if not testResult:
            failedTests.append(testName)

    #sys.stdout.write('1..%u\n' % numTests)
    if failedTests:
        sys.stdout.write('# %u tests failed\n' % len(failedTests))
        for failedTest in failedTests:
            sys.stdout.write('# - %s\n' % failedTest)
        sys.exit(1)

    sys.exit(0)
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import time
#多线程,适合高IO 爬虫类
import multiprocessing.dummy as Threads


class hello:
    def name(self, person_name):
        print Threads.current_process().name
        for i in range(10):
            time.sleep(1)
            print 'hello %s %d' %(person_name,i)

pool_size = Threads.cpu_count()*2

pool = Threads.Pool(pool_size)

print pool

names = [
    u'lily'.encode('utf-8'),
    u'fish'
]

hello = hello()
results = pool.map(hello.name, names)

pool.close()
pool.join()
Beispiel #30
0

# The following function & copy_reg.pickle() call make it possible for pickle to serialize class functions
# This is critical to allow multiprocessing.Pool.map_async() to work as desired
# See: http://stackoverflow.com/a/19861595
def _reduce_method(m):
    if m.__self__ is None:
        return getattr, (m.__class__, m.__func__.__name__)
    else:
        return getattr, (m.__self__, m.__func__.__name__)

copy_reg.pickle(types.MethodType, _reduce_method)


# Set default number of worker threads
if multiprocessing.cpu_count() <= 2:
    numThreadsDefault = 4
else:
    numThreadsDefault = multiprocessing.cpu_count() * 2


def jprint(jsoninput):
    """Pretty-print jsoninput."""
    return json.dumps(jsoninput, sort_keys=True, indent=2) + "\n"


def extract_cves_from_input(obj, descriptiveNoun=None):
    """Use case-insensitive regex to extract CVE ids from input object.

    *obj* can be a list, a file, or a string.
Beispiel #31
0
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import time
#多线程,适合高IO 爬虫类
import multiprocessing.dummy as Threads


class hello:
    def name(self, person_name):
        print Threads.current_process().name
        for i in range(10):
            time.sleep(1)
            print 'hello %s %d' % (person_name, i)


pool_size = Threads.cpu_count() * 2

pool = Threads.Pool(pool_size)

print pool

names = [u'lily'.encode('utf-8'), u'fish']

hello = hello()
results = pool.map(hello.name, names)

pool.close()
pool.join()
Beispiel #32
0
    def dynamoItem(self, current, aconnect, itemlist):
        added = False
        printColor(
            ['_____LISTING DynamoDB [] now....in .%s' % (aconnect._region)])
        threaded = aconnect.getMultiThread()
        nEnv = aconnect._env
        lfound = []
        objs = []
        rows = []

        pools = 2  # 171.35 seconds  #168 seconds
        totalItems = len(itemlist)
        if threaded:
            pools = auditMeth.poolThreadNumber(totalItems, mp.cpu_count())
            if pools > 0:
                p = mp.Pool(pools)
            elif pools == 0:
                threaded = False
        else:
            p = mp.Pool()
            m = mp.Manager()
            q = m.Queue()
        if totalItems > 0:
            objs.append([
                'Name[%s]' % (current.svc), 'Audit', 'Owner', 'Status',
                'PartitionKey', 'indexes', 'totalRead', 'totalWrite'
            ])

        client = aconnect.__get_client__('dynamodb')
        if not threaded:
            for name in itemlist:
                unit = client.describe_table(TableName=name)['Table']
                #account = dunit['TableArn']
                if nEnv in name or aconnect._useAccounts:
                    if pools == 0:
                        objs, row = dynamoDefine(unit, name, q)
                    else:
                        getit = p.apply_async(dynamoDefine, (unit, name, q))
                    #getit.get()
                    lfound.append(name)
                    #objs,row=self.dynamoDefine(current,aconnect,objs,unit,name)
                    #rows.append(row)
        else:
            #print itemlist
            lfound = [name for name in itemlist]
            #print lfound
            #print 'client ready?'
            #dd = client.describe_table(TableName='Tags')['Table']
            #print dd
            results = p.map(dynamoDefine,
                            (client.describe_table(TableName=name)['Table']
                             for name in itemlist))

        if pools > 0:
            p.close()
            p.join()
            if threaded:
                for que in results:
                    newobjs, row = que
                    #print ' RDS --got[C] result', row
                    objs = objs + newobjs
                    rows.append(row)
            else:
                while not q.empty():
                    newobjs, row = q.get()
                    objs = objs + newobjs
                    # print newobjs
                    rows.append(row)
        return (lfound, objs, rows)
Beispiel #33
0
    def collate(self):

        # Set the stacksize to be unlimited
        res.setrlimit(res.RLIMIT_STACK, (res.RLIM_INFINITY, res.RLIM_INFINITY))

        # Locate the FMS collation tool
        mppnc_path = None
        for f in os.listdir(self.expt.lab.bin_path):
            if f.startswith('mppnccombine'):
                mppnc_path = os.path.join(self.expt.lab.bin_path, f)
                break
        assert mppnc_path

        # Check config for collate command line options
        collate_flags = self.expt.config.get('collate_flags')
        if collate_flags is None:
            collate_flags = '-r -64'

        # Import list of collated files to ignore
        collate_ignore = self.expt.config.get('collate_ignore')
        if collate_ignore is None:
            collate_ignore = []
        elif type(collate_ignore) != list:
            collate_ignore = [collate_ignore]

        # Generate collated file list and identify the first tile
        tile_fnames = [
            f for f in os.listdir(self.output_path)
            if f[-4:].isdigit() and f[-8:-4] == '.nc.'
        ]

        mnc_tiles = defaultdict(list)
        for t_fname in tile_fnames:
            t_base, t_ext = os.path.splitext(t_fname)
            t_ext = t_ext.lstrip('.')

            # Skip any files listed in the ignore list
            if t_base in collate_ignore:
                continue

            mnc_tiles[t_base].append(t_fname)

        # If this is run interactively NCPUS is set in collate_cmd, otherwise
        # the cpu_count will return the number of CPUs assigned to the PBS job
        count = int(os.environ.get('NCPUS', multiprocessing.cpu_count()))
        pool = multiprocessing.Pool(processes=count)

        # Collate each tileset into a single file
        for nc_fname in mnc_tiles:
            nc_path = os.path.join(self.output_path, nc_fname)

            # Remove the collated file if it already exists, since it is
            # probably from a failed collation attempt
            # TODO: Validate this somehow
            if os.path.isfile(nc_path):
                os.remove(nc_path)

            cmd = '{} {} {} {}'.format(mppnc_path, collate_flags, nc_fname,
                                       ' '.join(mnc_tiles[nc_fname]))
            print(cmd)
            pool.apply_async(cmdthread, args=(cmd, self.output_path))

        pool.close()
        pool.join()