Example #1
0
def ftp_download(file_path, is_override, output_directory, uri, user, password, worker_semaphore, inserted_task_event, tasks, log_format, log_level):
    """
    download a specify file from the given ftp server and output the specify directory
    
    #>>> ftp_download('/tmp/archives.zip.002', True, 'f:/', '16.60.160.90', 'edwin', 'edwin')
    #>>> os.path.isfile('f:/archives.zip.002')
    #True
    """
    try:
        path, filename = os.path.split(file_path)
        logger         = multiprocessing.get_logger()
        init_logger(logger, log_level, log_format)
        ftp = ftplib.FTP(host=uri, user=user, passwd=password)
        ftp.cwd(path)
        output_file = os.path.join(output_directory, filename)
        logger.info('Start downloading %s' % file_path)
        if os.path.isfile(output_file) and not is_override:
            return
        if os.path.isfile(output_file):
            os.remove(output_file)
        ftp.retrbinary('RETR %s' % filename, lambda data: open(output_file, 'ab').write(data))
        ftp.close()
        logger.info('Complete file to %s' % output_file)
    except Exception as e:
        logger = multiprocessing.get_logger()
        logger.error('Download %s failed, error info %s' % (file_path, e))
        output_file = os.path.join(output_directory, filename)
        if os.path.exists(output_file):
            os.remove(output_file)
        tasks.put(filename)
        inserted_task_event.set()
    finally:
        logger = multiprocessing.get_logger()
        logger.debug('Release lock %s' % id(worker_semaphore))
        worker_semaphore.release()
Example #2
0
def setup_jenkins_console_logger(level=logging.INFO):
    formatter = logging.Formatter('%(process)s: %(msg)s')
    # Time is logged by Jenkins. Any log files on disk will be removed by
    # Jenkins when the next job starts
    stream_handler = logging.StreamHandler()
    stream_handler.setFormatter(formatter)
    multiprocessing.get_logger().setLevel(level)
    multiprocessing.get_logger().addHandler(stream_handler)
Example #3
0
def main(args):
    try:
        if args[0] == "filter":
            return run_filter(*args[1:])
        elif args[0] == "algo":
            return run_algo(*args[1:])
    except:
        get_logger().error(traceback.format_exc())
        return False
Example #4
0
 def __init__(self):
     proc_count = cpu_count() / 5 * 4
     if proc_count < 1:
         proc_count = 1
     self.pool = multiprocessing.Pool(proc_count, initializer=setup_jenkins_console_logger)
     self.workspace_path = ""
     self.runlist = []
     self.params = {}
     self.process_cli()
     multiprocessing.get_logger().info("\n{0}\nIntegration tests runner started.\n{0}\n".format("*" * 80))
Example #5
0
def spawn_test_process(test, flags):
    spell = ["{0} {1}".format(test, flags)]

    process = subprocess.Popen(spell, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)

    multiprocessing.get_logger().info(spell[0])
    out, err = process.communicate()

    # We need the out for getting the list of tests from an exec file
    # by sending it the --list_tests flag
    return test, filter(None, out.splitlines()), err, process.returncode
Example #6
0
def spawn_test_process(test, flags):
    spell = ["{0} {1}".format(test, flags)]

    process = subprocess.Popen(spell,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE,
                               shell=True
                               )

    multiprocessing.get_logger().info(spell[0])
    _, err = process.communicate()

    return test, err, process.returncode
    def __call__(self, *args, **kwargs):
        try:
            result = self.__callable(*args, **kwargs)

        except Exception as e:
            # Here we add some debugging help. If multiprocessing's
            # debugging is on, it will arrange to log the traceback
            multiprocessing.get_logger().error(traceback.format_exc())
            # Re-raise the original exception so the Pool worker can
            # clean up
            raise

        # It was fine, give a normal answer
        return result
Example #8
0
	def loop(self):
		while True:
			multiprocessing.get_logger().info("%s server: Waiting for signal" % self.sessionID)
			self.event.wait()
			while not self.queue.empty():
				print("%s server: Got request signal", self.sessionID)
				self.event.clear()
				request = queues[x].get(False)
				if isinstance(request, basestring) and request.lower() == "terminate":
					# terminate signal received => save dataframe and exit event loop => process gets terminated
					self.dataFrameManager.saveDataFrame()
					break
				else:
					self.requestManager.processRequest(request)
Example #9
0
def run_cross_tests(server_match, client_match, jobs, skip_known_failures):
  logger = multiprocessing.get_logger()
  logger.debug('Collecting tests')
  with open(path_join(TEST_DIR, CONFIG_FILE), 'r') as fp:
    j = json.load(fp)
  tests = crossrunner.collect_cross_tests(j, server_match, client_match)
  if not tests:
    print('No test found that matches the criteria', file=sys.stderr)
    print('  servers: %s' % server_match, file=sys.stderr)
    print('  clients: %s' % client_match, file=sys.stderr)
    return False
  if skip_known_failures:
    logger.debug('Skipping known failures')
    known = crossrunner.load_known_failures(TEST_DIR)
    tests = list(filter(lambda t: crossrunner.test_name(**t) not in known, tests))

  dispatcher = crossrunner.TestDispatcher(TEST_DIR, TEST_DIR, jobs)
  logger.debug('Executing %d tests' % len(tests))
  try:
    for r in [dispatcher.dispatch(test) for test in tests]:
      r.wait()
    logger.debug('Waiting for completion')
    return dispatcher.wait()
  except (KeyboardInterrupt, SystemExit):
    logger.debug('Interrupted, shutting down')
    dispatcher.terminate()
    return False
Example #10
0
def download_wallpapers(r, task, image_dir, resolution, quantity):
    """Downloads a number of wallpapers based on the resolution and quantity
    and adds each new wallpaper to the Redis database. For any wallpapers that
    are 1080p a low resolution version is created and the pair of images
    is added to the list of background images.
    """
    logger = get_logger()
    wall = WallDownloader(image_dir)
    for wallpaper in wall.downloads(resolution, quantity):
        logger.info("Wallpaper: " + wallpaper + " downloaded!")
        r.incrbyfloat('job:' + task + ':progress', 75.0 / quantity)

        image_name = WallDownloader.get_filename(wallpaper)
        uuid = r.get('image:' + image_name + ':uuid')

        if not uuid:
            uuid = generate_uuid()
            r.set('image:' + image_name + ':uuid', uuid)

        # Add the image to the list for the current resolution
        r.set('image:' + uuid + ':' + resolution, wallpaper)
        r.sadd('image:' + resolution + ':uuids', uuid)

        # Add 1080p images to list of background images
        if resolution == '1920x1080':
            create_background(r, task, uuid, image_dir, image_name)
Example #11
0
def prepare_download(task, image_dir, output_dir):
    """Prepares a download request submitted by a user, fetches a random set
    of wallpapers based on the resolution and number of wallpapers specified and
    creates a compressed zip file for download.
    """
    # Connect to the local Redis database, get logger
    r = redis.StrictRedis(host='localhost')
    logger = get_logger()

    # Get the resolution and number of downloads requested
    resolution = r.get('job:' + task + ':resolution')
    quantity = int(r.get('job:' + task + ':quantity'))

    # Exit if the job timed out (resolution and quantity requested no longer exist)
    # This should not happen unless the system is under heavy load, log issue
    if not resolution or not quantity:
        logger.warning('Job: ' + task + ' No resolution or quantity, job expired?')
        sys.exit(1)

    # Download more wallpapers if there are not enough for the resolution
    if r.scard('image:' + resolution + ':uuids') < max_wallpapers:
        logger.info('Job: ' + task + ' Downloading: ' + resolution + ' wallpapers!')
        download_wallpapers(r, task, image_dir, resolution, quantity)

    # Get a random set of wallpapers and create a zip file
    wallpapers = []
    for uuid in r.srandmember('image:' + resolution + ':uuids', quantity):
        wallpapers.append(r.get('image:' + uuid + ':' + resolution))

    logger.info('Job: ' + task + ' Wallpapers: ' + ', '.join(wallpapers))

    # Create a zip file, set key in redis
    compress_wallpapers(r, task, output_dir, wallpapers)

    sys.exit(0)
Example #12
0
def create_background(r, task, uuid, image_dir, image_name):
    """Creates a low resolution version of a 1080p wallpaper provided and saves
    the matching pair of images to the list of background images that are
    randomly displayed on the website.
    """
    logger = get_logger()

    # Get the wallpaper file path
    uuid = r.get('image:' + image_name + ':uuid')
    file = r.get('image:' + uuid + ':1920x1080')

    new_img_dir = os.path.join(os.path.normpath(image_dir), '800x480')
    new_file = os.path.join(os.path.normpath(new_img_dir), image_name + '_800x480.jpg')

    if not os.path.exists(new_img_dir):
            os.makedirs(new_img_dir)

    # Resize the image as 800x480 at 50% quality, save as new image
    logger.info('Job: ' + task + ' UUID: ' + uuid + ', creating background: ' + new_file)
    with Image(filename=file) as img:
        with img.clone() as new_img:
            new_img.compression_quality = 50
            new_img.resize(800, 480)
            new_img.save(filename=new_file)

    # Add the new background to the list
    r.set('image:' + uuid + ':800x480', new_file)
    r.sadd('image:backgrounds:uuids', uuid)
Example #13
0
 def run_periodic_tasks(self):
     logger = get_logger()
     applied = default_periodic_status_backend.run_periodic_tasks()
     for task, task_id in applied:
         logger.debug(
             "PeriodicWorkController: Periodic task %s applied (%s)" % (
                 task.name, task_id))
        def wrapped_function(*args, **kwargs):
            global return_value
            logger = multiprocessing.get_logger()

            # create a pipe to retrieve the return value
            parent_conn, child_conn = multiprocessing.Pipe()

            # create and start the process
            subproc = multiprocessing.Process(target=subprocess_func, name=" multiproc function call", args=(func,
                                                                                                             child_conn,
                                                                                                             mem_in_mb,
                                                                                                             cpu_time_in_s,
                                                                                                             wall_time_in_s,
                                                                                                             num_processes) + args,
                                              kwargs=kwargs)
            logger.debug("Your function is called now.")

            return_value = None

            # start the process
            subproc.start()
            child_conn.close()

            try:
                # read the return value
                return_value = parent_conn.recv()
            except EOFError:  # Don't see that in the unit tests :(
                logger.debug("Your function call closed the pipe prematurely -> None will be returned")
                return_value = None
            except:
                raise
            finally:
                # don't leave zombies behind
                subproc.join()
                return (return_value);
Example #15
0
def invoke_cmd_worker(item):
    try:
        logger = multiprocessing.get_logger()
        pid = multiprocessing.current_process().pid

        plugin_dir, plugin, filepath, events_limit = item
        worker_fpath = os.path.abspath(__file__)
        cmd = 'gzip -d -c %s | python2.7 %s %s %s %s' % (
            filepath, worker_fpath, plugin_dir, plugin, events_limit
        )
        logger.info(
            '%d: Starting job: %s', pid, cmd
        )

        env = os.environ.copy()
        env['PYTHONPATH'] = os.pathsep.join(sys.path)

        process = subprocess.Popen(
            cmd, stdout=subprocess.PIPE, shell=True,
            env=env
        )
        output = process.communicate()[0]
        return output
    except Exception as e:
        traceback.print_exc(e)
Example #16
0
def run_feature_tests(server_match, feature_match, jobs, skip_known_failures, only_known_failures, retry_count, regex):
    basedir = path_join(ROOT_DIR, FEATURE_DIR_RELATIVE)
    logger = multiprocessing.get_logger()
    logger.debug('Collecting tests')
    with open(path_join(TEST_DIR, CONFIG_FILE), 'r') as fp:
        j = json.load(fp)
    with open(path_join(basedir, CONFIG_FILE), 'r') as fp:
        j2 = json.load(fp)
    tests = crossrunner.collect_feature_tests(j, j2, server_match, feature_match, regex)
    if not tests:
        print('No test found that matches the criteria', file=sys.stderr)
        print('  servers: %s' % server_match, file=sys.stderr)
        print('  features: %s' % feature_match, file=sys.stderr)
        return False
    if only_known_failures:
        logger.debug('Only running known failures')
        known = crossrunner.load_known_failures(basedir)
        tests = list(filter(lambda t: crossrunner.test_name(**t) in known, tests))
    if skip_known_failures:
        logger.debug('Skipping known failures')
        known = crossrunner.load_known_failures(basedir)
        tests = list(filter(lambda t: crossrunner.test_name(**t) not in known, tests))

    dispatcher = crossrunner.TestDispatcher(TEST_DIR, ROOT_DIR, FEATURE_DIR_RELATIVE, jobs)
    logger.debug('Executing %d tests' % len(tests))
    try:
        for r in [dispatcher.dispatch(test, retry_count) for test in tests]:
            r.wait()
        logger.debug('Waiting for completion')
        return dispatcher.wait()
    except (KeyboardInterrupt, SystemExit):
        logger.debug('Interrupted, shutting down')
        dispatcher.terminate()
        return False
Example #17
0
    def __init__(self,cmd_param):
        '''
        主进程类 为了兼容windows而这样创建类
        '''
        super(Mtask,self).__init__()
        self.start_time = datetime.datetime.now()
        self.parent_timeout = 180
        self.parent_timeout_flag = 0
        self.child_timeout =120
        self.child_num = 10
        self.slice_num = 20
        self.process_list = []
        self.result =[]
        self.batch_id = 0
        self.print_flag = 1
        self.mult_debug_flag = 0
        self.cmd_param = cmd_param
       


        if self.mult_debug_flag:
            #设置进程log日志
            multiprocessing.log_to_stderr()
            logger=multiprocessing.get_logger()
            logger.setLevel(logging.INFO)
def SumData(roi_start_list, roi_end_list, file_list, file_path, istart, iend, queue):
    logger = mp.get_logger()
    mp.log_to_stderr(logging.INFO)

    roi_sums = mproc.SHARED_ARRAY
    data_array = XYDataArray()
    num_rois = len(roi_start_list)
    logger.info("Reading files from %d to %d" % (istart, iend))
    if istart > 0:
        istart = istart - 1

    # Process each file in the list that falls in the range istart to iend
    for i in range(istart, iend):

        # Read in the information from the file and create numpy arrays from that information.
        data_array.CreateArrays(os.path.join(file_path, file_list[i]))

        # Sum the data in the arrays that lies between the roi values.  Do this
        # for each roi that was created.
        for j in range(num_rois):
#           logger.info("Summing roi %d from file %d" % (j, i))
            roi_sums[j][i] = roi_sums[j][i] + data_array.SumROIData(roi_start_list[j], roi_end_list[j])

        # Add a value of 1 to the queue so that the user interface can be updated
        # with the latest progress.
        queue.put(1)
    return roi_sums
Example #19
0
def initialize(online, version, use_cherrypy):
    # configure the logger
    global logger
    if use_cherrypy:
        import cherrypy
        logger = cherrypy.log
    else:
        import multiprocessing
        logger = multiprocessing.get_logger()

    # create a database session
    connectionString = ConnectionString()
    engine = None

    if online == 'True':
        engine = create_engine(connectionString.connectUrlonline)
    else:
        engine = create_engine(connectionString.connectUrloffline)

    Base.prepare(engine, reflect=True)
    session = scoped_session(sessionmaker(engine))

    # store the version id
    global databuilder
    databuilder = DataBuilder(session, version, logger)
def run_cross_tests(server_match, client_match, jobs, skip_known_failures, retry_count, regex):
    logger = multiprocessing.get_logger()
    logger.debug("Collecting tests")
    with open(path_join(TEST_DIR, CONFIG_FILE), "r") as fp:
        j = json.load(fp)
    tests = crossrunner.collect_cross_tests(j, server_match, client_match, regex)
    if not tests:
        print("No test found that matches the criteria", file=sys.stderr)
        print("  servers: %s" % server_match, file=sys.stderr)
        print("  clients: %s" % client_match, file=sys.stderr)
        return False
    if skip_known_failures:
        logger.debug("Skipping known failures")
        known = crossrunner.load_known_failures(TEST_DIR)
        tests = list(filter(lambda t: crossrunner.test_name(**t) not in known, tests))

    dispatcher = crossrunner.TestDispatcher(TEST_DIR, ROOT_DIR, TEST_DIR_RELATIVE, jobs)
    logger.debug("Executing %d tests" % len(tests))
    try:
        for r in [dispatcher.dispatch(test, retry_count) for test in tests]:
            r.wait()
        logger.debug("Waiting for completion")
        return dispatcher.wait()
    except (KeyboardInterrupt, SystemExit):
        logger.debug("Interrupted, shutting down")
        dispatcher.terminate()
        return False
Example #21
0
    def __init__(self, host, pipe, port=80, channels=None):
        """
        Create a new client.

        host        : host to connect
        pipe        : pipe of paths
        port        : port to connect
        channels    : map of file descriptors 

        """
        asynchat.async_chat.__init__(self, map=channels)
        self._log = multiprocessing.get_logger()
        self._host = host
        self._pipe = pipe
        self._port = port
        self._time = 0
        self._htime = 0
        self._path = ""
        self._header = ""
        self._body = ""
        self._data = ""
        self._protocol = ""
        self._status = -1
        self._status_msg = ""
        self._close = False
        self._chunked = True
        self._content_length = -1

        self.set_terminator(HTTPAsyncClient.TERMINATOR)
        self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
        self.connect((self._host, self._port))
        self._log.debug(self.logmsg("HTTPAsyncClient connected to %s:%d",
            self._host, self._port))
        self.send_request()
Example #22
0
 def test_setup_logger_no_handlers_file(self):
     from multiprocessing import get_logger
     l = get_logger()
     l.handlers = []
     tempfile = mktemp(suffix="unittest", prefix="celery")
     l = setup_logger(logfile=tempfile, loglevel=0)
     self.assertTrue(isinstance(l.handlers[0], logging.FileHandler))
Example #23
0
def error(msg, *args):
  """Shortcut to multiprocessing's logger"""
  ############################################################# DEBUG
  import sys
  sys.stdout.flush()
  ############################################################# DEBUG
  return mp.get_logger().error(msg, *args)
def setup_logger(settings):
    class ColorFilter(logging.Filter):
        def filter(self, record):
            if not hasattr(record, 'xcolor'):
                record.xcolor = ''
            return True    

    logger = multiprocessing.get_logger()
    logger.setLevel(logging.DEBUG)
    if settings['log_file_dir'] and settings['log_file_level']:
        pname = multiprocessing.current_process().name
        ctime = time.strftime('%Y-%m-%d_%H-%M-%S')
        filename = os.path.join(settings['log_file_dir'], 
            'log_%s_%s.txt' % (pname, ctime))
        filehandler = logging.FileHandler(filename, mode='w', encoding='utf8')
        filehandler.setLevel(settings['log_file_level'])
        fileformatter = logging.Formatter(
            '[%(asctime)s] %(levelname)-8s %(message)s')
        filehandler.setFormatter(fileformatter)
        logger.addHandler(filehandler)
    if settings['log_stderr_level']:
        streamhandler = logging.StreamHandler(stream=sys.stderr)
        streamhandler.setLevel(settings['log_stderr_level'])
        streamhandler.addFilter(ColorFilter())
        streamformatter = logging.Formatter(
            '[%(levelname)s/%(processName)s] %(xcolor)s%(message)s' +
            Colors.NORMAL)
        streamhandler.setFormatter(streamformatter)
        logger.addHandler(streamhandler)
    return logger
Example #25
0
def test_log():
    multiprocessing.log_to_stderr()
    logger = multiprocessing.get_logger()
    logger.setLevel(logging.INFO)
    t1 = time.time()
    print(time.time() - t1)
    logger.info("done")
Example #26
0
def smac_classpath():
    """
    Small function gathering all information to build the java class path.
    
    :returns: string representing the Java classpath for SMAC
    
    """
    import multiprocessing
    from pkg_resources import resource_filename
    
    logger = multiprocessing.get_logger()
    
    smac_folder = resource_filename("pysmac", 'smac/%s' % pysmac.remote_smac.SMAC_VERSION)
    
    smac_conf_folder = os.path.join(smac_folder, "conf")
    smac_patches_folder = os.path.join(smac_folder, "patches")
    smac_lib_folder = os.path.join(smac_folder, "lib")


    classpath = [fname for fname in os.listdir(smac_lib_folder) if fname.endswith(".jar")]
    classpath = [os.path.join(smac_lib_folder, fname) for fname in classpath]
    classpath = [os.path.abspath(fname) for fname in classpath]
    classpath.append(os.path.abspath(smac_conf_folder))
    classpath.append(os.path.abspath(smac_patches_folder))

    # For Windows compability
    classpath = (os.pathsep).join(classpath)

    logger.debug("SMAC classpath: %s", classpath)

    return classpath
    def run(self):

        logger = multiprocessing.get_logger()

        GPIO.setmode(GPIO.BOARD)
        GPIO.setup(self.led_pin, GPIO.OUT)
        GPIO.output(self.led_pin, True)

        reader = MFRC522(self.spi_device)

        while True:

            reader.MFRC522_Request(reader.PICC_REQIDL)
            (status, data) = reader.MFRC522_Anticoll()

            if status == reader.MI_OK:
                vote = {
                    'tag_id': "%0.2x%0.2x%0.2x%0.2x" % (data[0], data[1], data[2], data[3]),
                    'timestamp': time.time(),
                    'voting_machine_id': getnode(),  # see RFC 4122
                    'spi_device': self.spi_device,
                }
                if self.submit_vote(vote):
                    logger.info('Vote submitted %s', vote)
                    self.led_blink()
                else:
                    logger.error('Could not submit vote %s', vote)
Example #28
0
def run(plugin_name, start_date, end_date, plugin_dir,
        data_dir='/mnt/disk1/alohalytics/by_date',
        results_dir='./stats',
        events_limit=0):
    """
    Pyaloha stats processing pipeline:
0. Load worker, aggregator, processor classes from a specified plugin (script)
1. Run workers (data preprocessors) on alohalytics files within specified range
2. Accumulate [and postprocess] worker results with an aggregator instance
3. Run stats processor and print results to stdout
    """

    aggregator = aggregate_raw_data(
        data_dir, results_dir, plugin_dir, plugin_name,
        start_date, end_date, events_limit
    )

    stats = load_plugin(
        plugin_name, plugin_dir=plugin_dir
    ).StatsProcessor(aggregator)

    logger = multiprocessing.get_logger()

    logger.info('Stats: processing')
    stats.process_stats()

    logger.info('Stats: outputting')
    stats.print_stats()

    logger.info('Stats: done')
def SumPixels(file_list, file_path, istart, iend, queue):
    logger = mp.get_logger()
    mp.log_to_stderr(logging.INFO)

    roi_sums = mproc.SHARED_ARRAY
    data_array = TiffDatatArray()
    logger.info("Reading files from %d to %d" % (istart, iend))
    if istart > 0:
        istart = istart - 1

    # Process each file in the list that falls in the range istart to iend
    for i in range(istart, iend):

        # Read in the information from the file and create numpy arrays from that information.
        data_array.CreateArrays(os.path.join(file_path, file_list[i]))

        # Sum the data in the arrays that lies between the roi values.  Do this
        # for each roi that was created.
        new_sum = data_array.GetDataArray()
        roi_sums = numpy.add(roi_sums, new_sum)

        # Add a value of 1 to the queue so that the user interface can be updated
        # with the latest progress.
        queue.put(1)
    return roi_sums
Example #30
0
def setup_logger(logFormat, logFileName):
    logger = multiprocessing.get_logger()
    logHandler = logging.FileHandler(logFileName, mode = 'a')
    logHandler.setFormatter(logging.Formatter(logFormat))
    logger.addHandler(logHandler)
    logger.setLevel(logging.DEBUG)
    return logger
Example #31
0
import multiprocessing as mp
import ctypes
import argparse
import numpy as np
import scipy.linalg
import time
import logging

info = mp.get_logger().info

#================================================================== Standard data as arg


def Estep_serial(X, w, MuList, SigmaList):
    N = X.shape[0]
    K = w.shape[0]
    logResp = np.zeros((N, K))
    for k in xrange(K):
        logResp[:, k] = loggausspdf(X, MuList[k, :], SigmaList[k, :, :])
    logResp += np.log(w)
    return logResp


def distMahal(X, mu, Sigma):
    ''' Calc mahalanobis distance: (x-mu)^T Sigma^{-1} (x-mu)
       for each row of matrix X
  '''
    Xdiff = X - mu
    cholSigma = scipy.linalg.cholesky(Sigma, lower=True)
    Q = np.linalg.solve(cholSigma, Xdiff.T)
    distPerRow = np.sum(Q**2, axis=0)
Example #32
0
import logging
from typing import List
import sys

import sqlite3
import multiprocessing
from multiprocessing import Process
from allennlp.common.file_utils import cached_path

logger = logging.getLogger(__name__)  # pylint: disable=invalid-name
MULTIPROCESSING_LOGGER = multiprocessing.get_logger()


class SqlExecutor:
    """
    This class evaluates SQL queries by connecting to a SQLite database. Because SQLite is disk-based
    we just need to provide one file with the location. We execute the predicted SQL query and the labeled
    queries against the database and check if they execute to the same table.
    """
    def __init__(self, database_file: str) -> None:
        # Initialize a cursor to our sqlite database, so we can execute SQL queries for denotation accuracy.
        self._database_file = cached_path(database_file)
        self._connection = sqlite3.connect(self._database_file)
        self._cursor = self._connection.cursor()

    def evaluate_sql_query(self, predicted_sql_query: str,
                           sql_query_labels: List[str]) -> int:
        # We set the logging level for the subprocesses to warning, otherwise, it will
        # log every time a process starts and stops.
        MULTIPROCESSING_LOGGER.setLevel(logging.WARNING)
Example #33
0
 def wrapper(*largs, **kwargs):
     try:
         res = f(*largs, **kwargs)
     except Exception, e:
         multiprocessing.get_logger().exception(e)
         raise
Example #34
0
 def __init__(self):
     self._log = multiprocessing.get_logger()
     self._lock = multiprocessing.Lock()
     self._ports = set()
     self._dom_ports = set()
     self._last_alloc = 0
def main():
    # send it all to stderr
    mp.log_to_stderr()
    # get access to a logger and set its logging level to INFO
    logger = mp.get_logger()
    logger.setLevel(logging.INFO)

    dataset = read_data(DATASET_PATH)
    # global train_x, test_x, train_y, test_y

    train_x, test_x, train_y, test_y = split_dataset(dataset, 0.25)

    # print("--- Testing Sequence DE ---")
    # start_time_seq = time.time()
    # result_seq = list(de_sequence(fobj, bounds=[(-100, 100)] * 6))
    # print(result_seq[-1])
    # print("")
    # print("--- %s seconds ---" % (time.time() - start_time_seq))
    #
    # sleep(5)

    print("--- Tuning Random Forest with Parallel DE ---")
    start_time_rf_tuning_para = time_RF.time()

    # result_para = list(de_parallel(fobj, bounds=[(-100, 100)] * 6))
    # print(result_para[-1])

    # initialization
    bounds = [(10, 150), (1, 20), (2, 20), (2, 50), (0.01, 1), (1, 10)]
    mut = 0.8
    crossp = 0.7
    popsize = 60
    its = 100

    dimensions = len(bounds)
    pop = np.random.rand(popsize, dimensions)

    # pdb.set_trace()
    min_b, max_b = np.asarray(bounds).T
    diff = np.fabs(min_b - max_b)
    pop_denorm = min_b + pop * diff

    # convert from float to integer
    pop_denorm_convert = pop_denorm.tolist()

    result_list = []
    temp_list = []

    for index in pop_denorm_convert:
        temp_list.append(np.int_(np.round_(index[0])))
        temp_list.append(np.int_(np.round_(index[1])))
        temp_list.append(np.int_(np.round_(index[2])))
        temp_list.append(np.int_(np.round_(index[3])))
        temp_list.append(float('%.2f' % index[4]))
        temp_list.append(np.int(np.round_(index[5])))
        result_list.append(temp_list)
        temp_list = []

    fitness = np.asarray([
        rf_tuning(index[0], index[1], index[2], index[3], index[4], index[5],
                  train_x, test_x, train_y, test_y) for index in result_list
    ])

    best_idx = np.argmax(fitness)
    best = pop_denorm[best_idx]

    print("Dimension:", dimensions)
    print("pop:", pop)
    print("min_b:", min_b)
    print("max_b:", max_b)
    print("diff:", diff)
    print("pop_denorm:", pop_denorm)
    print("fitness:", fitness)
    print("best_idx:", best_idx)
    print("best:", best)

    lock = mp.Lock()
    # execute loops in each process
    processes = []
    for x in range(mp.cpu_count()):
        processes.append(
            mp.Process(target=de_innerloop,
                       args=(output, its, popsize, pop, mut, dimensions,
                             crossp, min_b, diff, lock, fitness, best_idx,
                             best, train_x, test_x, train_y, test_y)))

    # Run processes
    for p in processes:
        p.start()

    # Exit the completed processes
    # Without join() function call, process will remain idle and won’t terminate
    for p in processes:
        p.join()

    # Get process results from the output queue
    results = [output.get() for p in processes]
    print(results)

    print("")
    print("--- %s seconds ---" % (time_RF.time() - start_time_rf_tuning_para))
    print("")
Example #36
0
from bs4 import BeautifulSoup as BS
from urllib2 import urlopen
from urlparse import urlparse
from multiprocessing import Pool, get_logger
from functools import partial
import os.path as op
import os
import logging.handlers
import sys

console_handler = logging.StreamHandler(sys.stdout)
console_formatter = logging.Formatter(
    "[%(asctime)s] %(levelname)s %(message)s", "%Y-%m-%d %H:%M:%S")
console_handler.setFormatter(console_formatter)
get_logger().addHandler(console_handler)
get_logger().setLevel(logging.INFO)

TUMBLR_NAME = sys.argv[1]
BASE_URL = 'http://{}.tumblr.com'.format(TUMBLR_NAME)


def download_photoset(i, photoset_paths, page):
    try:
        len_posts = len(photoset_paths)
        post_path = photoset_paths[i]
        u = urlopen(BASE_URL + post_path)
        bs = BS(u, 'html.parser')
        post_imgs = [
            a.attrs['href'] for a in bs.find_all('a') if a.attrs['href']
        ]
Example #37
0
 def __init_log(self):
     # multiprocessing.log_to_stderr(logging.DEBUG)
     multiprocessing.log_to_stderr()
     self.logger = multiprocessing.get_logger()
     self.logger.setLevel(logging.CRITICAL)
Example #38
0
import time
import math
import multiprocessing
import logging
import pandas as pd
from multiprocessing import Process, current_process
from multiprocessing import log_to_stderr, get_logger

import argparse

if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    #logging info
    log_to_stderr()
    logger = get_logger()
    logger.setLevel(logging.INFO)

    #optional arguments
    parser.add_argument('--pout',
                        help='Output file directory for candidate h5',
                        type=str,
                        default='/home/guest/suyash/default_cand/')
    parser.add_argument('--pin',
                        help='input csv file for candmaker',
                        type=str,
                        default='/home/guest/suyash/fetch/cands.csv')
    parser.add_argument('--clean',
                        help='deletes contents of default',
                        default=0)
    args = parser.parse_args()
Example #39
0
def main():

    parser = argparse.ArgumentParser(prog="sir")
    parser.add_argument("-d", "--debug", action="store_true")
    parser.add_argument("--sqltimings", action="store_true")
    subparsers = parser.add_subparsers()

    reindex_parser = subparsers.add_parser("reindex",
                                           help="Reindexes all or a single "
                                           "entity type")
    reindex_parser.set_defaults(func=reindex)
    reindex_parser.add_argument('--entity-type',
                                action='append',
                                help="Which entity types to index.",
                                choices=SCHEMA.keys())

    generate_trigger_parser = subparsers.add_parser("triggers",
                                                    help="Generate triggers")
    generate_trigger_parser.set_defaults(func=generate_func)
    generate_trigger_parser.add_argument('-t',
                                         '--trigger-file',
                                         action="store",
                                         default="sql/CreateTriggers.sql",
                                         help="The filename to save the "
                                         "triggers into")
    generate_trigger_parser.add_argument('-f',
                                         '--function-file',
                                         action="store",
                                         default="sql/CreateFunctions.sql",
                                         help="The filename to save the "
                                         "functions into")
    generate_trigger_parser.add_argument('-bid',
                                         '--broker-id',
                                         action="store",
                                         default="1",
                                         help="ID of the AMQP broker row "
                                         "in the database.")

    generate_extension_parser = subparsers.add_parser(
        "extension", help="Generate extension")
    generate_extension_parser.set_defaults(func=generate_extension)
    generate_extension_parser.add_argument('-e',
                                           '--extension-file',
                                           action="store",
                                           default="sql/CreateExtension.sql",
                                           help="The filename to save the "
                                           "extension into")

    amqp_setup_parser = subparsers.add_parser("amqp_setup",
                                              help="Set up AMQP exchanges and "
                                              "queues")
    amqp_setup_parser.set_defaults(func=setup_rabbitmq)

    amqp_watch_parser = subparsers.add_parser("amqp_watch",
                                              help="Watch AMQP queues for "
                                              "changes")
    amqp_watch_parser.set_defaults(func=watch)

    args = parser.parse_args()
    if args.debug:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.INFO)

    loghandler = logging.StreamHandler()
    if args.debug:
        formatter = logging.Formatter(fmt="%(processName)s %(asctime)s  "
                                      "%(levelname)s: %(message)s")
    else:
        formatter = logging.Formatter(fmt="%(asctime)s: %(message)s")
    loghandler.setFormatter(formatter)
    logger.addHandler(loghandler)

    mplogger = multiprocessing.get_logger()
    mplogger.setLevel(logging.ERROR)
    mplogger.addHandler(loghandler)

    if args.sqltimings:
        from sqlalchemy import event
        from sqlalchemy.engine import Engine
        import time

        sqltimelogger = logging.getLogger("sqltimer")
        sqltimelogger.setLevel(logging.DEBUG)
        sqltimelogger.addHandler(loghandler)

        @event.listens_for(Engine, "before_cursor_execute")
        def before_cursor_execute(conn, cursor, statement, parameters, context,
                                  executemany):
            conn.info.setdefault('query_start_time', []).append(time.time())
            sqltimelogger.debug("Start Query: %s", statement)
            sqltimelogger.debug("With Parameters: %s", parameters)

        @event.listens_for(Engine, "after_cursor_execute")
        def after_cursor_execute(conn, cursor, statement, parameters, context,
                                 executemany):
            total = time.time() - conn.info['query_start_time'].pop(-1)
            sqltimelogger.debug("Query Complete!")
            sqltimelogger.debug("Total Time: %f", total)

    config.read_config()
    try:
        init_raven_client(config.CFG.get("sentry", "dsn"))
    except ConfigParser.Error as e:
        logger.info(
            "Skipping Raven client initialization. Configuration issue: %s", e)
    func = args.func
    args = vars(args)
    func(args)
Example #40
0
def init(loglevel=logging.INFO):
    multiprocessing.log_to_stderr()
    multiprocessing.get_logger().setLevel(loglevel)
    results = multiprocessing.Queue()
    return results
Example #41
0
def synchronous_pull_with_lease_management(project_id, subscription_name):
    """Pulling messages synchronously with lease management"""
    # [START pubsub_subscriber_sync_pull_with_lease]
    import logging
    import multiprocessing
    import random
    import time

    from google.cloud import pubsub_v1

    # TODO project_id = "Your Google Cloud Project ID"
    # TODO subscription_name = "Your Pub/Sub subscription name"

    subscriber = pubsub_v1.SubscriberClient()
    subscription_path = subscriber.subscription_path(project_id,
                                                     subscription_name)

    NUM_MESSAGES = 2
    ACK_DEADLINE = 30
    SLEEP_TIME = 10

    # The subscriber pulls a specific number of messages.
    response = subscriber.pull(subscription_path, max_messages=NUM_MESSAGES)

    multiprocessing.log_to_stderr()
    logger = multiprocessing.get_logger()
    logger.setLevel(logging.INFO)

    def worker(msg):
        """Simulates a long-running process."""
        RUN_TIME = random.randint(1, 60)
        logger.info("{}: Running {} for {}s".format(
            time.strftime("%X", time.gmtime()), msg.message.data, RUN_TIME))
        time.sleep(RUN_TIME)

    # `processes` stores process as key and ack id and message as values.
    processes = dict()
    for message in response.received_messages:
        process = multiprocessing.Process(target=worker, args=(message, ))
        processes[process] = (message.ack_id, message.message.data)
        process.start()

    while processes:
        for process in list(processes):
            ack_id, msg_data = processes[process]
            # If the process is still running, reset the ack deadline as
            # specified by ACK_DEADLINE once every while as specified
            # by SLEEP_TIME.
            if process.is_alive():
                # `ack_deadline_seconds` must be between 10 to 600.
                subscriber.modify_ack_deadline(
                    subscription_path,
                    [ack_id],
                    ack_deadline_seconds=ACK_DEADLINE,
                )
                logger.info("{}: Reset ack deadline for {} for {}s".format(
                    time.strftime("%X", time.gmtime()),
                    msg_data,
                    ACK_DEADLINE,
                ))

            # If the processs is finished, acknowledges using `ack_id`.
            else:
                subscriber.acknowledge(subscription_path, [ack_id])
                logger.info("{}: Acknowledged {}".format(
                    time.strftime("%X", time.gmtime()), msg_data))
                processes.pop(process)

        # If there are still processes running, sleeps the thread.
        if processes:
            time.sleep(SLEEP_TIME)

    print("Received and acknowledged {} messages. Done.".format(
        len(response.received_messages)))

    subscriber.close()
Example #42
0
class MegaMerger(multiprocessing.Process):
    log = multiprocessing.get_logger()

    def __init__(self, input_file_queue, output_file, ninputs):
        super(MegaMerger, self).__init__()
        self.input = input_file_queue
        self.output = output_file
        self.first_merge = True
        self.ninputs = ninputs
        self.processed = 0
        self.pbar = ProgressBar(widgets=[
            FormatLabel('Processed %(value)i/' + str(ninputs) + ' files. '),
            ETA(),
            Bar('>')
        ],
                                maxval=ninputs).start()
        self.pbar.update(0)
        self.files_to_clean = None

    def merge_into_output(self, files):
        self.files_to_clean = list(files)
        self.log.info("Merging %i into output %s", len(files), self.output)
        # Merge into a temporary output file
        output_file_hash = hashlib.md5()
        to_merge = []
        for file in files:
            to_merge.append(file)
            output_file_hash.update(file)

        output_file_name = os.path.join(tempfile.gettempdir(),
                                        output_file_hash.hexdigest() + '.root')

        # If we are doing a later merge, we need to include the
        # "merged-so-far"
        if self.first_merge:
            self.first_merge = False
        else:
            to_merge.append(self.output)

        merger = ROOT.TFileMerger()
        merger.OutputFile(output_file_name)
        for file in to_merge:
            merger.AddFile(file, False)
        result = merger.Merge()

        self.log.info("Merge completed with result: %s" % result)
        self.log.info("Output file is: %s, moving to %s", output_file_name,
                      self.output)
        shutil.move(output_file_name, self.output)

        # Cleanup.  We don't need to cleanup the temporary output, since it
        # is moved.
        while self.files_to_clean:
            os.remove(self.files_to_clean.pop())
        return result

    def run(self):
        # ignore sigterm signal and let parent take care of this
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        while True:
            # Check if we are done merging
            done = False
            inputs_to_merge = []
            # Accumulate some files to merge
            while True:
                try:
                    self.log.debug("trying to get")
                    to_merge = self.input.get(timeout=1)
                    self.log.debug("got %s", to_merge)
                    # Check for poison pill
                    if to_merge is None:
                        self.log.info("Got poison pill - shutting down")
                        done = True
                        break
                    inputs_to_merge.append(to_merge)
                    self.processed += to_merge[0]
                    self.pbar.update(self.processed)
                    # Make it merge the files if the queue is stacking up.
                    if len(inputs_to_merge) > 15:
                        break
                except Empty:
                    self.log.debug("empty to get")
                    # Noting to merge right now
                    break
                except IOError, e:
                    if e.errno == errno.EINTR:
                        self.log.debug(
                            "Interrupted by IOError, probably because user's system setting changed in a weird way"
                        )
                        break
                    else:
                        raise
                    # Reset loop and hope the problem has passed
            if inputs_to_merge:
                files_to_merge = []
                for entries, file in inputs_to_merge:
                    files_to_merge.append(file)
                self.merge_into_output(files_to_merge)
            if done:
                return
Example #43
0
import subprocess
import tempfile
import minion.sensing.exceptions
import minion.sensing.postprocessors
import minion.core.components.exceptions
import multiprocessing

logger = multiprocessing.get_logger()


class PocketsphinxSpeechToText(minion.sensing.postprocessors.BasePostprocessor
                               ):
    configuration = {
        'delete_audio_file':
        True  # Set this to false to debug by keeping audio file after request
    }

    def __init__(self, name, configuration={}):
        super(PocketsphinxSpeechToText, self).__init__(name, configuration)

        options = self.get_configuration('options', {})
        self.options = ' '.join([
            '-{} {}'.format(key, value) for key, value in options.iteritems()
        ])

    def process(self, data):
        with tempfile.NamedTemporaryFile(
                delete=self.get_configuration('delete_audio_file')) as f:
            f.write(data)
            logger.debug('Writing to temporary file %s', f.name)
            if not self.get_configuration('delete_audio_file'):
Example #44
0
def synchronous_pull_with_lease_management(project_id, subscription_id):
    """Pulling messages synchronously with lease management"""
    # [START pubsub_subscriber_sync_pull_with_lease]
    import logging
    import multiprocessing
    import sys
    import time

    from google.api_core import retry
    from google.cloud import pubsub_v1

    multiprocessing.log_to_stderr()
    logger = multiprocessing.get_logger()
    logger.setLevel(logging.INFO)
    processes = dict()

    # TODO(developer)
    # project_id = "your-project-id"
    # subscription_id = "your-subscription-id"

    subscriber = pubsub_v1.SubscriberClient()
    subscription_path = subscriber.subscription_path(project_id,
                                                     subscription_id)

    response = subscriber.pull(
        request={
            "subscription": subscription_path,
            "max_messages": 3
        },
        retry=retry.Retry(deadline=300),
    )

    # Start a process for each message based on its size modulo 10.
    for message in response.received_messages:
        process = multiprocessing.Process(target=time.sleep,
                                          args=(sys.getsizeof(message) % 10, ))
        processes[process] = (message.ack_id, message.message.data)
        process.start()

    while processes:
        # Take a break every second.
        if processes:
            time.sleep(1)

        for process in list(processes):
            ack_id, msg_data = processes[process]
            # If the process is running, reset the ack deadline.
            if process.is_alive():
                subscriber.modify_ack_deadline(
                    request={
                        "subscription": subscription_path,
                        "ack_ids": [ack_id],
                        # Must be between 10 and 600.
                        "ack_deadline_seconds": 15,
                    })
                logger.info(f"Reset ack deadline for {msg_data}.")

            # If the process is complete, acknowledge the message.
            else:
                subscriber.acknowledge(request={
                    "subscription": subscription_path,
                    "ack_ids": [ack_id]
                })
                logger.info(f"Acknowledged {msg_data}.")
                processes.pop(process)
    print(
        f"Received and acknowledged {len(response.received_messages)} messages from {subscription_path}."
    )

    # Close the underlying gPRC channel. Alternatively, wrap subscriber in
    # a 'with' block to automatically call close() when done.
    subscriber.close()
Example #45
0
def error(*args):
    """Error function"""
    return multiprocessing.get_logger().error(*args)
Example #46
0
File: main.py Project: SevanSSP/dtm
"""
Module with functions for dispatching subprocess and managing multiprocess pools
"""
import multiprocessing as mp
import subprocess
import os
import json
import logging
import argparse
import datetime

# grab logger from multiprocessing package
logger = mp.get_logger()

# setup logging levels
LOGGING_LEVELS = dict(
    debug=logging.DEBUG,
    info=logging.INFO,
    warning=logging.WARNING,
    error=logging.ERROR,
)


def subprocess_command(command,
                       path=None,
                       shell=False,
                       env=None,
                       pipe=False,
                       timeout=None):
    """
    Execute command in subprocess.
Example #47
0
    def handle(self, *args, **options):
        multiprocessing.log_to_stderr()
        logger = multiprocessing.get_logger()
        logger.setLevel(logging.INFO)

        if PARALLEL:
            pool = multiprocessing.Pool()

        corpus_results = {}
        results = []
        total_l1 = []
        total_l2 = []
        filenames = []

        for collection_root in COLLECTION_ROOTS:  #iterate over phonbank and CHILDES
            for collection_name in next(os.walk(collection_root))[1]:

                print("*** ", collection_name)
                corpus_roots = []

                for root, dirnames, filenames in os.walk(
                        os.path.join(collection_root, collection_name)):
                    for filename in fnmatch.filter(filenames,
                                                   '*.zip_placeholder'):
                        corpus_roots.append(
                            os.path.join(
                                collection_root, collection_name, root,
                                filename.replace('.zip_placeholder', '')))

                corpus_roots = list(set(corpus_roots))
                print('Corpora to process: ' + str(len(corpus_roots)))

                for corpus_root in corpus_roots:
                    if PARALLEL:
                        results.append(
                            pool.apply_async(count_corpora,
                                             args=(corpus_root, )))
                    else:
                        results.append(count_corpora(corpus_root, ))

        if PARALLEL:
            pool.close()

        for result in results:
            try:
                t1, t2, missing, filename = result.get()
                total_l1.extend(t1)
                total_l2.extend(t2)  # would need a map for special subsetting
                filenames.extend(filename)
            except:
                traceback.print_exc()

        counts = numpy.asarray([total_l1, total_l2])
        numpy.savetxt("freq.csv",
                      counts.transpose(),
                      delimiter=",",
                      fmt='%.3e')

        print('Number of transcripts in correlation:')
        print(len(total_l1))
        coeff = numpy.corrcoef(total_l1, total_l2)[0, 1]
        print("Correlation coefficient (Pearson's r)")
        print(coeff)
        os.system("echo %s > word_frequency_correlation.txt" % coeff)

        import pdb
        pdb.set_trace()
Example #48
0
def getLogger(loggerName,
              fileName='',
              fileMode='a',
              fileLevel='DEBUG',
              printLevel='WARN',
              colours={},
              logger_type='logging',
              capture_warnings=True):
    """Utility that returns a logger object

    Parameters
    ----------
    loggerName: str
        Name for the logger, usually just provide __name__
    fileName: str or bool, optional
        Name for the log file, if False no log file is generated
    fileMode: str, optional, default: 'a'
        Mode for opening log file (e.g. 'w' for write, 'a' for append).
    fileLevel: {'DEBUG','INFO','WARN'}, optional, default: 'DEBUG'
        Log level for FileHandler
    printLevel: {'DEBUG','INFO','WARN'}, optional, default: 'WARN'
        Log level for StreamHandler
    colours: dict, optional, default: {}
        Override default colours for ColourStreamHandler (see .logging.colourstreamhandler.ColourStreamHandler for more info)

    Returns
    -------
    logger: logging.Logger
        A logger object with FileHandler and StreamHandler as configured. Note the logger is a singleton.
    """
    logger_levels = {
        'DEBUG': logging.DEBUG,
        'INFO': logging.INFO,
        'WARN': logging.WARN
    }

    if logger_type == 'logging':
        logger = logging.getLogger(loggerName)
    elif logger_type == 'multiprocessing':
        import multiprocessing
        logger = multiprocessing.get_logger()
    else:
        raise ValueError(
            f'invalid logger_type {logger_type}. Must be "logging" or "multiprocessing"'
        )

    logging.captureWarnings(capture_warnings)

    if logger.hasHandlers():
        logger.handlers[:] = []

    logger.setLevel(logging.DEBUG)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s/%(processName)s - %(module)s/%(funcName)s - %(levelname)s - %(message)s'
    )

    if fileName:
        if not fileLevel in logger_levels:
            raise ValueError(
                f"Provided invalid fileLevel '{fileLevel}'. Valid options: {logger_levels.keys()}"
            )
        fileHandler = logging.FileHandler(fileName, mode=fileMode)
        fileHandler.setLevel(logger_levels[fileLevel])
        fileHandler.setFormatter(formatter)
        logger.addHandler(fileHandler)

    if not printLevel in logger_levels:
        raise ValueError(
            f"Provided invalid printLevel '{printLevel}'. Valid options: {logger_levels.keys()}"
        )
    try:
        import colorama
    except ImportError:
        streamHandler = logging.StreamHandler()
        colour_support = False
    else:
        from .colourstreamhandler import ColourStreamHandler
        colorama.init(autoreset=True)
        streamHandler = ColourStreamHandler(colours)
        colour_support = True
    streamHandler.setLevel(printLevel)
    logger.addHandler(streamHandler)

    if colour_support:
        logger.info('Logger initialized with colour support.')
    else:
        logger.info(
            'Logger initialized without colour support. Failed to import colorama.'
        )

    return logger
def Attack(packetList, e):
    logger = multiprocessing.get_logger()
    attackSocket = socket.socket(
        socket.AF_INET,
        socket.SOCK_RAW,
        socket.IPPROTO_TCP
    )

    attackSocket.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)

    while True:
        e.wait()

        while packetList:
            packet = packetList.pop(0)
            ipPacket = IPv4Packet()
            ihl = 5
            version = 4
            ipPacket.SetIpVersionIhl((version << 4) + ihl)
            ipPacket.SetIpLen(40)
            ipPacket.SetIpId(random.randint(0, 32767))
            ipPacket.SetIpTTL(255)
            ipPacket.SetIpProtocol(socket.IPPROTO_TCP)
            ipPacket.SetIpSrc(packet["ipDst"])
            ipPacket.SetIpDst(packet["ipSrc"])
            ipHeader = ipPacket.Encode()
            ipPacket.SetIpSum(ipHeader)

            tcpPacket = TCPPacket()

            if options["action"] == "RESET":
                tcpPacket.SetTcpDestPort(packet["tcpSrcPort"])
                tcpPacket.SetTcpSrcPort(packet["tcpDestPort"])
                tcpPacket.SetTcpSeq(packet["tcpAckSeq"])
                tcpPacket.SetTcpDoffReserved((5 << 4) + 0)
                tcpPacket.SetTcpRstBit(1)
                tcpPacket.SetTcpFlags()

            logger.info("Attacking {}:{} and {}:{} with {}.".format(
                packet["ipSrc"], packet['tcpSrcPort'],
                packet["ipDst"], packet['tcpDestPort'], options["action"]))
            tcpHeader = tcpPacket.Encode()
            ipSrc = socket.inet_aton(str(packet["ipDst"]))
            ipDst = socket.inet_aton(str(packet["ipSrc"]))
            placeholder = 0
            protocol = socket.IPPROTO_TCP
            tcpLength = len(tcpHeader)

            psudoHeader = pack('!4s4sBBH', ipSrc, ipDst, placeholder, protocol, tcpLength)
            psudoHeader += tcpHeader

            tcpPacket.SetTcpCheck(psudoHeader)

            tcpHeader = tcpPacket.Encode()

            result = ipHeader + tcpHeader

            attackSocket.sendto(result, (str(packet["ipSrc"]), 0))

        e.clear()

    attackSocket.close()
Example #50
0
def lat_opt_unpack(args):
    import multiprocessing
    logger = multiprocessing.get_logger()
    args[2]['ihnf'] = args[3]
    return args[3], lat_opt(args[0], args[1], **(args[2]))
Example #51
0
def error(msg, *args):
    return multiprocessing.get_logger().error(msg, *args)
Example #52
0
def modify_insert_type_read(cigar,read,del_parts,pos,blocks,del_blocks,chr,shift_start,shift_end,HG):
	logger=multiprocessing.get_logger()
	read_parts=list()
	del_part=list()
	D_index=list()
	I_index=list()
	M_index=list()
	S_index=list()
	cigar_pat=re.compile(r'[0-9]+D|[0-9]+M|[0-9]+S|[0-9]+I')
	D_M_array=cigar_pat.findall(cigar)
	DM_len=list()
	for item in D_M_array:
		DM_len.append(int(item[:-1]))
	sum=copy.deepcopy(DM_len)
	sum[0]=DM_len[0]
	for i in range(1,len(DM_len)):
		if("D" in D_M_array[i]):
			sum[i]=sum[i-1]
		else:
			sum[i]=sum[i-1]+DM_len[i]
	for i in range(len(D_M_array)):
		if "D" in D_M_array[i]:
			D_index.append(i)
		elif("I" in D_M_array[i]):
			I_index.append(i)
		elif("M" in D_M_array[i]):
			M_index.append(i)
		elif("S" in D_M_array[i]):
			S_index.append(i)
	cursor=0
	for i in range(len(D_M_array)):
		read_parts.append(read[cursor:sum[i]])
		cursor=sum[i]
	for i in range(len(D_M_array)):
		if("D" in D_M_array[i]):
			read_parts[i]=del_parts[0]
			if(len(del_parts[1:])>0):
				del_parts=del_parts[1:]
			else:
				break
	Insert_l_r_blocks=dict()
	M_match_blocks=dict()
	M_block_index=0
	for i in range(len(D_M_array)):
		if("M" in D_M_array[i]):
			M_match_blocks[i]=blocks[M_block_index]
			M_block_index+=1
	for i in range(len(D_M_array)):
		if("I" in D_M_array[i]):
			if(i>0 and i<(len(D_M_array)-1)):
				'''
				print 'D_M_array=',D_M_array
				print 'i=',i
				print 'len(M_match_blocks)=',len(M_match_blocks)
				print 'cigar=',cigar
				'''
				Insert_l_r_blocks[i]=[M_match_blocks[i-1],M_match_blocks[i+1]]
	Insert_delete_bool=dict()
	for i in Insert_l_r_blocks:
		pos_=Insert_l_r_blocks[i][0][1]
		if (pos_>=shift_start and pos_<=shift_end):
			if pos_!=pos:
				read_parts[i]=""
		else:
			read_parts[i]=""
	new_read_start,new_read_end=blocks[0][0],blocks[-1][1]
	if(M_index[0]>0):
		for i in range(M_index[0])[::-1]:
			if("S" in D_M_array[i]):
				if(len(read_parts[i])>10):
					S_len=DM_len[i]
					S_add_seq=get_ref(chr,new_read_start-S_len,new_read_start,HG)
					read_parts[i]=S_add_seq
				new_read_start-=len(read_parts[i])
	if(M_index[-1]!=len(D_M_array)-1):
		for i in range(M_index[-1]+1,len(D_M_array)):
			if("S" in D_M_array[i]):
				if(len(read_parts[i])>10):
					S_len=DM_len[i]
					S_add_seq=get_ref(chr,new_read_end,new_read_end+S_len,HG)
					read_parts[i]=S_add_seq
				new_read_end+=len(read_parts[i])
	new_read="".join(read_parts)
	return new_read,new_read_start,new_read_end
Example #53
0
        http://www.apache.org/licenses/LICENSE-2.0

    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
"""

import logging, multiprocessing

__all__ = ['FastTripsLogger', 'setupLogging']

#: This is the instance of :py:class:`Logger` that gets used for all dta logging needs!
FastTripsLogger = multiprocessing.get_logger()


def setupLogging(infoLogFilename,
                 debugLogFilename,
                 logToConsole=True,
                 append=False):
    """
    Sets up the logger.

    :param infoLogFilename: info log file, will receive log messages of level INFO or more important.
       Pass None for no info log file.
    :param debugLogFilename: debug log file, will receive log messages of level DEBUG or more important.
       Pass None for no debug log file.
    :param logToConsole: if true, INFO and above will also go to the console.
    """
Example #54
0
def error(msg, *args):
    """ Error function"""
    return multiprocessing.get_logger(LOG_NAME).error(msg, *args)
Example #55
0
 def func_wrapper(test, flags):
     logger = multiprocessing.get_logger()
     logger.info("start: >{0} {1}".format(test, flags))
     result = fn(test, flags)
     logger.info("end: >{0} {1}".format(test, flags))
     return result
Example #56
0
import pylator as pyl
import multiprocessing as mp
import numpy as np
import time
from collections import deque

crit = mp.get_logger().critical
info = mp.get_logger().info
debug = mp.get_logger().debug


def module_configuration():
    simData = {
        # Define Module Outputs
        "/outputs/signal/dtype": "double",
        "/outputs/signal/shape": [200],
        "/outputs/signal/default": 0,

        # Define module inputs with assigned defaults
        "/inputs/signal": 0
    }
    return simData


class Module(pyl.Model):
    def initialise(self, simData):
        # Inputs using default values, set as constants (Generated)
        # User data
        self.buffer = deque()
        for i in range(simData["/outputs/signal/shape"][0] * 2):
            self.buffer.append(0)
Example #57
0
"""

import os, sys
import time
import json
import logging
import traceback

from optparse import OptionParser
from logging.handlers import RotatingFileHandler
from multiprocessing import get_logger, log_to_stderr

from ircbot import SingleServerIRCBot
from irclib import nm_to_n, all_events

log = get_logger()
_ourPath = os.getcwd()
_ourName = os.path.splitext(os.path.basename(sys.argv[0]))[0]


def relativeDelta(td):
    s = ''
    if td.days < 0:
        t = "%s ago"
    else:
        t = "in %s"

    days = abs(td.days)
    seconds = abs(td.seconds)
    minutes = seconds / 60
    hours = minutes / 60
Example #58
0
import multiprocessing as mp
from functools import partial

log = mp.get_logger()


def log_to_stderr(log_level='INFO', force=False):
    """
    Shortcut allowing to display logs from workers.

    :param log_level: Set the logging level of this logger.
    :param force: Add handler even there are other handlers already.
    """
    if not log.handlers or force:
        mp.log_to_stderr()
    log.setLevel(log_level)


def delayed(func):
    """
    Decorator used to capture the arguments of a function.
    Analogue of joblib's delayed.

    :param func: Function to be captured.
    """
    def wrapper(*args, **kwargs):
        return partial(func, *args, **kwargs)

    return wrapper

Example #59
0
 def __init__(self):
     self._log = multiprocessing.get_logger()
     self._lock = multiprocessing.Lock()
Example #60
0
def asyncPoolEC(func, args, kwargs, NP=1, ldebug=False, ltrialnerror=True):
    ''' 
    A function that executes func with arguments args (len(args) times) on NP number of processors;
    args must be a list of argument tuples; kwargs are keyword arguments to func, which do not change
    between calls.
    Func is assumed to take a keyword argument lparallel to indicate parallel execution, and return 
    a common exit status (0 = no error, > 0 for an error code).
    This function returns the number of failures as the exit code. 
  '''
    # input checking
    if not isinstance(func, types.FunctionType): raise TypeError
    if not isinstance(args, list): raise TypeError
    if not isinstance(kwargs, dict): raise TypeError
    if NP is not None and not isinstance(NP, int): raise TypeError
    if not isinstance(ldebug, (bool, np.bool)): raise TypeError
    if not isinstance(ltrialnerror, (bool, np.bool)): raise TypeError

    # figure out if running parallel
    if NP is not None and NP == 1: lparallel = False
    else: lparallel = True
    kwargs['ldebug'] = ldebug
    kwargs['lparallel'] = lparallel

    # logging level
    if ldebug: loglevel = logging.DEBUG
    else: loglevel = logging.INFO
    # set up parallel logging (multiprocessing)
    if lparallel:
        multiprocessing.log_to_stderr()
        mplogger = multiprocessing.get_logger()
        #if ldebug: mplogger.setLevel(logging.DEBUG)
        if ldebug: mplogger.setLevel(logging.INFO)
        else: mplogger.setLevel(logging.ERROR)
    # set up general logging
    logger = logging.getLogger('multiprocess.asyncPoolEC')  # standard logger
    logger.setLevel(loglevel)
    ch = logging.StreamHandler(sys.stdout)  # stdout, not stderr
    ch.setLevel(loglevel)
    ch.setFormatter(logging.Formatter('%(message)s'))
    logger.addHandler(ch)
    kwargs['logger'] = logger.name
    #   # process sub logger
    #   sublogger = logging.getLogger('multiprocess.asyncPoolEC.func') # standard logger
    #   sublogger.propagate = False # only print message in this logger
    #   sch = logging.StreamHandler(sys.stdout) # stdout, not stderr
    #   sch.setLevel(loglevel)
    #   if lparallel: fmt = logging.Formatter('[%(processName)s] %(message)s')
    #   else: fmt = logging.Formatter('%(message)s')
    #   sch.setFormatter(fmt)
    #   sublogger.addHandler(sch)
    #   kwargs['logger'] = sublogger.name

    # apply decorator
    if ltrialnerror: func = TrialNError(func)

    # print first logging message
    logger.info(datetime.today())
    logger.info('\nTHREADS: {0:s}, DEBUG: {1:s}\n'.format(
        str(NP), str(ldebug)))
    exitcodes = []  # list of results

    def callbackEC(result):
        # custom callback function that appends the results to the list
        exitcodes.append(result)

    ## loop over and process all job sets
    if lparallel:
        # create pool of workers
        pool = multiprocessing.Pool(
            processes=NP)  # NP=None uses all available CPUs
        # distribute tasks to workers
        for arguments in args:
            #exitcodes.append(pool.apply_async(func, arguments, kwargs))
            #print arguments
            pool.apply_async(func, arguments, kwargs, callback=callbackEC)
            # N.B.: we do not record result objects, since we have callback, which just extracts the exitcodes
        # wait until pool and queue finish
        pool.close()
        pool.join()
        logger.debug('\n   ***   all processes joined   ***   \n')
    else:
        # don't parallelize, if there is only one process: just loop over files
        for arguments in args:
            exitcodes.append(func(*arguments, **kwargs))

    # evaluate exit codes
    exitcode = 0
    for ec in exitcodes:
        #if lparallel: ec = ec.get() # not necessary, if callback is used
        if ec < 0: raise ValueError, 'Exit codes have to be zero or positive!'
        elif ec > 0: ec = 1
        # else ec = 0, i.e. no errors
        exitcode += ec
    # N.B.: returnign None is interpreted as
    nop = len(args) - exitcode

    # print summary (to log)
    if exitcode == 0:
        logger.info(
            '\n   >>>   All {:d} operations completed successfully!!!   <<<   \n'
            .format(nop))
    else:
        logger.info(
            '\n   ===   {:2d} operations completed successfully!    ===   \n'.
            format(nop) +
            '\n   ###   {:2d} operations did not complete/failed!   ###   \n'.
            format(exitcode))
    logger.info(datetime.today())
    # return with exit code
    return exitcode