Пример #1
0
def main():
    """Main function for invoking the bootstrap process

	:raises Exception: When the invoking user is not root and --dry-run isn't specified
	"""
    # Get the commandline arguments
    opts = get_opts()
    # Require root privileges, except when doing a dry-run where they aren't needed
    import os
    if os.geteuid() != 0 and not opts['--dry-run']:
        raise Exception('This program requires root privileges.')

    import log
    # Log to file unless --log is a single dash
    if opts['--log'] != '-':
        # Setup logging
        if not os.path.exists(opts['--log']):
            os.makedirs(opts['--log'])
        log_filename = log.get_log_filename(opts['MANIFEST'])
        logfile = os.path.join(opts['--log'], log_filename)
    else:
        logfile = None
    log.setup_logger(logfile=logfile, debug=opts['--debug'])

    # Everything has been set up, begin the bootstrapping process
    run(opts)
Пример #2
0
def main():
	"""Main function for invoking the bootstrap process

	Raises:
		Exception
	"""
	# Get the commandline arguments
	opts = get_opts()
	# Require root privileges, except when doing a dry-run where they aren't needed
	import os
	if os.geteuid() != 0 and not opts['--dry-run']:
		raise Exception('This program requires root privileges.')

	import log
	# Log to file unless --log is a single dash
	if opts['--log'] != '-':
		# Setup logging
		if not os.path.exists(opts['--log']):
			os.makedirs(opts['--log'])
		log_filename = log.get_log_filename(opts['MANIFEST'])
		logfile = os.path.join(opts['--log'], log_filename)
	else:
		logfile = None
	log.setup_logger(logfile=logfile, debug=opts['--debug'])

	# Everything has been set up, begin the bootstrapping process
	run(opts)
Пример #3
0
def main():
    """Main entry point for recipes"""
    log_file = '{}.log'.format(datetime.now().strftime('%Y%m%d_%H%M%S'))
    setup_logger(log_path=os.path.join(SRC_ROOT, 'log', log_file))
    recipe_root_dir = '/home/jakob/personal/bryggans/Departement/Bryggeri/recept_arkiv'
    recipe_file = 'kalaslager.xml'
    recipe = Recipe()
    recipe.recipe_from_bxml(os.path.join(recipe_root_dir, recipe_file))
    logger.info(sum_list(recipe.fermentables_list))
Пример #4
0
def main(args=None):
    args = parse_configs(args)
    setup_logger(args.log_level)
    mongo_client = CpsMongoClient(args.db_address)
    api_client = CpsApiClient()
    test_client = TestCaseClient(mongo_client, api_client)
    #test_client.load(f"{args.command}-{args.sample}")
    logger.info(f"Available Test Cases are {test_client.available_test_cases}")
    test_client.set_context(args.command, load=False)
    generate_receipts(test_client, args.command)
Пример #5
0
def just_doit(instance_name):
    """ Do the dirty work, or let ansible do it. """

    logger = log.setup_logger('%s - ansible.out' % instance_name)
    logger.debug('Attempting ansible tasks on instance-name: %s', instance_name)
    vars = "instance_name=%s" % instance_name
    cmd = '/usr/bin/ansible-playbook -vvv --extra-vars '\
          '\"%s\" %s/stackit.yml' % (vars, cfg.Ansible.ansible_dir)

    logger.debug('Running ansible stackit command: %s', cmd)
    ansible_proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
    output = ansible_proc.communicate()[0]
    logger.debug('Response from ansible: %s', output)

    #do not teardown the error instances
    if output.find('Error') != -1:
        return output

    vars = "instance_name=%s" % instance_name
    cmd = '/usr/bin/ansible-playbook --extra-vars '\
          '\"%s\" %s/teardown.yml' % (vars, cfg.Ansible.ansible_dir)

    logger.debug('Running ansible teardown command: %s', cmd)
    ansible_proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
    output += ansible_proc.communicate()[0]

    return output
  def __init__(self, chipOwner):
    self.cache = CacheL2()

    self._chipOwner = chipOwner

    LOG_FILENAME = 'logs/system'
    self._logging = setup_logger(LOG_FILENAME, "{}.log".format(LOG_FILENAME))
Пример #7
0
def main():
    logger = log.setup_logger(__name__)
    print_log(logger, "Start main")

    parser = argparse.ArgumentParser()
    parser.add_argument("--demo",
                        type=int,
                        help="determine the demo to be executed (1|2|3)")
    parser.add_argument("--verbosity",
                        help="increase output verbosity",
                        action="store_true")
    args = parser.parse_args()

    if args.verbosity:
        settings.VERBOSE = True
        print_log(logger, "verbosity turned on")
    if args.demo:
        demo_id = args.demo
    else:
        parser.error("You have to decide one of the available demo (1|2|3)")
        return -1

    print_log(logger, "Selected demo: %d" % demo_id)

    demos.demo_manager.exec_demo(demo_id)

    print_log(logger, "Demo %s done." % demo_id)
    return 0
    def __init__(self):
        self._mem = []

        LOG_FILENAME = 'logs/system'
        self._logging = setup_logger(LOG_FILENAME,
                                     "{}.log".format(LOG_FILENAME))

        for _ in range(16):
            self._mem.append(MemoryLine())
Пример #9
0
def main():
	"""Main function for invoking the bootstrap process

	Raises:
		Exception
	"""
	# Get the commandline arguments
	import os
	args = get_args()
	# Require root privileges, except when doing a dry-run where they aren't needed
	if os.geteuid() != 0 and not args.dry_run:
		raise Exception('This program requires root privileges.')
	# Setup logging
	import log
	logfile = log.get_logfile_path(args.manifest)
	log.setup_logger(logfile=logfile, debug=args.debug)
	# Everything has been set up, begin the bootstrapping process
	run(args)
Пример #10
0
def main():
    """Main function for invoking the bootstrap process

	Raises:
		Exception
	"""
    # Get the commandline arguments
    import os
    args = get_args()
    # Require root privileges, except when doing a dry-run where they aren't needed
    if os.geteuid() != 0 and not args.dry_run:
        raise Exception('This program requires root privileges.')
    # Setup logging
    import log
    logfile = log.get_logfile_path(args.manifest)
    log.setup_logger(logfile=logfile, debug=args.debug)
    # Everything has been set up, begin the bootstrapping process
    run(args)
Пример #11
0
def validate_config():
    ''' Validates currently stored config-values'''
    # Don't do this at module level as we cannot guarantee that all config options used by setup_logger are already setup
    logger = log.setup_logger(__name__)

    if bind_port <= 0:
        raise ValueError("Port must be a positive, non-zero integer")
    elif bind_port < 1024:
        logger.warning(
            "Port should not be in the range of well-known ports (0-1023)")
Пример #12
0
def _get_ec2_userdata():
    ''' Checks if ec2 metadata contain userdata related to dynalize and use them to startup '''

    try:
        r = requests.get("http://localhost/latest/user-data")

        if r.status_code == 200:
            return r.json()['dynalize_parameter']
    except:
        logger = log.setup_logger(__name__)
        logger.error("Failed to get ec2 userdata")

    return ""
Пример #13
0
def main():
    """Parse the command line config and start listening on associated ports.
    """

    # parse the args
    args = parse_args(sys.argv)
    # setup the logger
    print(args.log)
    logger = log.setup_logger(args.log)
    # make a solenoid
    solenoid = Solenoid(args.solenoid, args.openduration)
    # make an rfid reader
    reader = Reader(solenoid, args.port, args.baudrate)
    # start the reader
    reader.start()
Пример #14
0
def main():
    """Parse the command line config and start listening on associated ports.
    """

    # parse the args
    args = parse_args(sys.argv)
    # setup the logger
    print args.log
    logger = log.setup_logger(args.log)
    # make a solenoid
    solenoid = Solenoid(solenoid_pin, args.openduration)
    # make an rfid reader
    reader = Reader(solenoid, args.port, baud_rate)
    # start the reader
    reader.start()
Пример #15
0
    def __init__(self, name, chipNumber, storageOut, storageIn, mainwin,
                 guiQueue):
        threading.Thread.__init__(self)

        self._name = name
        self._chipNumber = chipNumber
        self._instructions = ["READ", "CALC", "WRITE"]
        self._storageOut = storageOut
        self._storageIn = storageIn

        self._mainwin = mainwin
        self._guiQueue = guiQueue

        LOG_FILENAME = 'logs/system'
        self._logging = setup_logger(LOG_FILENAME,
                                     "{}.log".format(LOG_FILENAME))
Пример #16
0
def update_debug_mode():
    if not debug:
        return

    # Don't do this at module level as we cannot guarantee that all config options used by setup_logger are already setup
    logger = log.setup_logger(__name__)

    # Check debug setting and change any already existing logger to DEBUG level.
    # Any logger created afterwards will be set to DEBUG already.
    created_loggers = logging.getLogger().manager.loggerDict

    for temp_logger_name, temp_logger in created_loggers.items():
        logger.debug("Changing loglevel of '%s' to DEBUG", temp_logger_name)
        try:
            temp_logger.setLevel(logging.DEBUG)
        except AttributeError:
            pass
Пример #17
0
    def __init__(self, city, keyword, logfile=""):
        self.session = requests.session()
        self.session.keep_alive = False
        self.detail_queue = Queue()
        self.query = {
            "city": city,
            "province": "",
            "keyword": keyword
        }
        self.pipeline = RedisPipeline()
        self.filter = DummyFilter(self.sid)
        self.statistics = {
            'cur_page': 0,
            'finished': 0,
            'failed': 0,
            'total': 0,
        }
        if not logfile:
            logfile = os.path.join(os.path.dirname(__file__), "../log/%s.log" % self.sid)

        self.logger = setup_logger(logfile)
Пример #18
0
import argparse
import sys
import samtools
import re
import shutil
import os.path
from ngs_mapper.bam import sortbam, indexbam

import log
logger = log.setup_logger('tagreads',log.get_config())

# Exception for when headers exist
class HeaderExists(Exception): pass

# The next 3 tuples have to be the same length and each index in each is related the same index in each tuple
# AKA zip( IDS, PLATFORMS, ID_MAP ) should work as expected
# Read group ID list
IDS = ('Roche454', 'IonTorrent', 'MiSeq', 'Sanger')
# Valid platforms for read groups
PLATFORMS = ('L454', 'IONTORRENT', 'ILLUMINA', 'CAPILLARY')
# Read name map to ID name
ID_MAP = (
    re.compile( '[0-9A-Z]{14}' ),
    re.compile( '[A-Z0-9]{5}:\d{1,}:\d{1,}' ),
    re.compile( 'M[0-9]{5}:\d+:[\w\d-]+:\d:\d{4}:\d{4,5}:\d{4,5}' ),
    re.compile( '.*' )
)
# Read Group Template
RG_TEMPLATE = {
    'SM': None,
    'ID': None,
Пример #19
0
from pprint import pprint
from urllib.parse import urlparse

from config import HEADER, PAYLOAD_CHARS, PAYLOAD_CHARS_, BREAKER_THRESHOLD
from core.parser import HtmlParser
from core.requester import requester
import log
from utils import get_query_dict, get_url, gen_scout_str, gen_check_str, get_valid_paths
'''
1.找到需要替换的参数
2.生成随机字符替换参数来发起请求,拿到响应后得到上下文(属性、js、css
3.依据上下文进行尝试得到该上下文可用的用于达到执行js的符号,进行有效性评分
4.
'''

logger = log.setup_logger(__name__)


def analyse(url, GET, data=None, PATH=False, header=HEADER):
    param_msg = {}  # 用于收集各个参数 对应的 context,position信息
    # 输入:GET、PATH\url\data
    # 明确 要请求的参数data,以及请求的url
    if GET:
        if PATH:  # 从url路径中取要替换的成分
            data = get_valid_paths(url)
        else:  # 从url参数中取要替换的成分
            url_parse_result = urlparse(
                url
            )  # ParseResult(scheme='http', netloc='192.168.1.46', path='/dvwa/vulnerabilities/xss_r/', params='', query='name=hi', fragment='')
            query = url_parse_result.query
            if query == "":
Пример #20
0
from functools import partial

from Config import Config
from dtos.sensors.DistanceSensorDTO import DistanceSensorType
from services.ComponentService import ComponentService
from services.EventService import EventService
from flask_script import Server
from log import setup_logger
import requests

setup_logger('root')


class FlaskServer(Server):
    def __call__(self, app, *args, **kwargs):
        self.init()
        return Server.__call__(self, app, *args, **kwargs)

    @staticmethod
    def register_listeners():
        ComponentService().left_distance_sensor.triggered = partial(
            EventService.send_distance_sensor_event, DistanceSensorType.LEFT)
        ComponentService().right_distance_sensor.triggered = partial(
            EventService.send_distance_sensor_event, DistanceSensorType.RIGHT)

    @staticmethod
    def init():
        FlaskServer.register_listeners()
        r = requests.post(
            f'{Config.externalServerUrl}/numbers/{Config.roomNumber}/register',
            headers={'API-Key': Config.apiKey})
Пример #21
0
import MySQLdb
import re
import pytz
import datetime
import traceback
sys.path.append('pronto')

from app import Config
Config.load_app_config()

import periodic_db_updater_conf

from pronto import Pronto, View, ProntoHttp404

from log import setup_logger
setup_logger()
logger = logging.getLogger('pronto_logger')
pp = pprint.PrettyPrinter(indent=4)


g_conn = MySQLdb.connect( host        = Config.database['host'],
                          user        = Config.database['user'],
                          passwd      = Config.database['passwd'],
                          db          = Config.database['schema'],
                          charset     = 'utf8',
                          use_unicode = True)

g_conn.autocommit(False)
g_cur = g_conn.cursor()

def db_get_single_value( sql, params ):
Пример #22
0
def just_doit(patchset_ref, results_dir):
    """ Do the dirty work, or let ansible do it. """

    ref_name = patchset_ref.replace('/', '-')
    logger = log.setup_logger(results_dir + '/ansible.out')
    logger.debug('Attempting ansible tasks on ref-name: %s', ref_name)
    vars = "instance_name=%s" % (ref_name)
    vars += " patchset_ref=%s" % patchset_ref
    vars += " results_dir=%s" % results_dir
    cmd = '/usr/local/bin/ansible-playbook --extra-vars '\
          '\"%s\" %s/run_ci.yml' % (vars, cfg['Ansible']['ansible_dir'])

    logger.debug('Running ansible run_ci command: %s', cmd)
    ansible_proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
    output = ansible_proc.communicate()[0]
    logger.debug('Response from ansible: %s', output)

    vars = "ref_name=%s" % (ref_name)
    vars += " results_dir=%s" % results_dir
    cmd = '/usr/local/bin/ansible-playbook --extra-vars '\
          '\"%s\" %s/publish.yml' % (vars, cfg['Ansible']['ansible_dir'])
    logger.debug('Running ansible publish command: %s', cmd)

    # This output is actually the ansible output
    # should fix this up and have it just return the status
    # and the tempest log that we xfrd over
    ansible_proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
    output += ansible_proc.communicate()[0]
    logger.debug('Response from ansible: %s', output)

    success = False
    hash_id = None
    console_log = results_dir + '/' + 'console.log.out'
    logger.debug('Looking for console log at: %s', console_log)
    if os.path.isfile(console_log):
        logger.debug('Found the console log...')
        if 'Failed: 0' in open(console_log).read():
            logger.debug('Evaluated run as successful')
            success = True

        logger.info('Status from console logs: %s', success)
        # We grab the abbreviated sha from the first line of the
        # console.out file
        with open(console_log) as f:
            first_line = f.readline()
        print "Attempting to parse: %s" % first_line
        hash_id = first_line.split()[1]

    # Finally, delete the instance regardless of pass/fail
    # NOTE it's moved out of tasks here otherwise it won't
    # run if preceeded by a failure
    vars = "instance_name=%s" % (ref_name)
    vars += " patchset_ref=%s" % patchset_ref
    cmd = '/usr/local/bin/ansible-playbook --extra-vars '\
          '\"%s\" %s/teardown.yml' % (vars, cfg['Ansible']['ansible_dir'])

    logger.debug('Running ansible teardown command: %s', cmd)
    ansible_proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
    output += ansible_proc.communicate()[0]

    return (hash_id, success, output)
Пример #23
0
    def KAFKA(a):
        return config.get('KAFKA', a)

    def ORION(a):
        return config.get('ORION', a)

    def CLASSFCTN(a):
        return config.get('CLASSIFICATION', a)

    def LOG(a):
        return config.get('LOG', a)

    # set the logging level
    if args.debug:
        setup_logger(logfile=LOG('LOGFILE'), loglevel=10)
    else:
        setup_logger(logfile=LOG('LOGFILE'))
    LOGGER = logger(__name__)

    # Initialize the source and sink queue with locks
    sourceQueue = Queue.Queue()
    sourceLock = threading.Lock()
    sinkQueue = Queue.Queue()
    sinkLock = threading.Lock()

    if args.tensorflow:
        FIRECLF = TensorflowClassifier(CLASSFCTN('IMAGES_DIR'),
                                       CLASSFCTN('LABELS'),
                                       CLASSFCTN('FROZEN_GRAPH'), sourceQueue,
                                       sourceLock)
Пример #24
0
import argparse
import sys
import samtools
import re
import shutil
import os.path
from ngs_mapper.bam import sortbam, indexbam

import log
logger = log.setup_logger('tagreads', log.get_config())


# Exception for when headers exist
class HeaderExists(Exception):
    pass


# The next 3 tuples have to be the same length and each index in each is related the same index in each tuple
# AKA zip( IDS, PLATFORMS, ID_MAP ) should work as expected
# Read group ID list
IDS = ('Roche454', 'IonTorrent', 'MiSeq', 'Sanger')
# Valid platforms for read groups
PLATFORMS = ('L454', 'IONTORRENT', 'ILLUMINA', 'CAPILLARY')
# Read name map to ID name
ID_MAP = (re.compile('[0-9A-Z]{14}'), re.compile('[A-Z0-9]{5}:\d{1,}:\d{1,}'),
          re.compile('M[0-9]{5}:\d+:[\w\d-]+:\d:\d{4}:\d{4,5}:\d{4,5}'),
          re.compile('.*'))
# Read Group Template
RG_TEMPLATE = {'SM': None, 'ID': None, 'PL': None, 'CN': None}

Пример #25
0
    # C:/Users/mteng/Desktop/small case/
    # ./Inputs/P=10
    # ./Inputs/case1

    # ### run single
    from log import setup_logger

    # for i in range(6, 7):
    _result_output_path = 'C:/Users/mteng/Desktop/P123/P3/output/result'
    data_path = 'C:/Users/mteng/Desktop/P123/P3'
    log_output_path = 'C:/Users/mteng/Desktop/P123/P3/output/log'

    if not exists(log_output_path):
        makedirs(log_output_path)

    _logger = setup_logger('20170613', '%s/my.log' % log_output_path)
    (objVal, cost, gap) = heuristic_delta_weight(data_path, converge_count=20, tolerance=0.5, d1=10, d2=0.1)
    print(objVal, cost, gap)

    # write intermediate result to file
    _weight_dataset.to_csv('%s/log-weight.csv' % log_output_path, index=False)
    _pa_dataset.to_csv('%s/log-pa.csv' % log_output_path, index=False)
    _pr_dataset.to_csv('%s/log-pr.csv' % log_output_path, index=False)
    _single_project_objective_dataset.to_csv('%s/log-single-project-objective.csv' % log_output_path, index=False)
    _tardiness_objective_dataset.to_csv('%s/log-tardiness-objective.csv' % log_output_path, index=False)
    _pa_max_dataset.to_csv('%s/log-pa-max.csv' % log_output_path, index=False)

    # clear existing dataset
    _weight_dataset.drop(_weight_dataset.index, inplace=True)
    _pa_dataset.drop(_pa_dataset.index, inplace=True)
    _pr_dataset.drop(_pr_dataset.index, inplace=True)
Пример #26
0
def main():
    args,qsubargs = parse_args()
    # Qsub job?
    if qsubargs:
        runsampleargs, _ = split_args(' '.join(sys.argv[1:]))
        print pbs_job(runsampleargs, qsubargs)
        sys.exit(1)
    # So we can set the global logger
    global logger
    # Setup analysis directory
    if os.path.isdir( args.outdir ):
        if os.listdir( args.outdir ):
            raise AlreadyExists( "{0} already exists and is not empty".format(args.outdir) )
    else:
        os.makedirs(args.outdir)

    # tempdir root will be TMPDIR environ variable if it exists
    # unless outdir is set
    # allows user to specify TMPDIR somewhere else if they want such as
    # /dev/shm
    tmpdir = args.outdir
    # Directory analysis is run in will be inside of tmpdir
    tdir = tempfile.mkdtemp('runsample', args.prefix, dir=tmpdir)
    os.environ['TMPDIR'] = tdir

    bamfile = os.path.join( tdir, args.prefix + '.bam' )
    flagstats = os.path.join( tdir, 'flagstats.txt' )
    consensus = bamfile+'.consensus.fasta'
    vcf = bamfile+'.vcf'
    bwalog = os.path.join( tdir, 'bwa.log' )
    stdlog = os.path.join( tdir, args.prefix + '.std.log' )
    logfile = os.path.join( tdir, args.prefix + '.log' )
    CN = args.CN

    # Set the global logger
    config = log.get_config( logfile )
    logger = log.setup_logger( 'runsample', config )

    #make_project_repo( tdir )

    logger.info( "--- Starting {0} --- ".format(args.prefix) )
    if args.config:
        logger.info( "--- Using custom config from {0} ---".format(args.config) )
    # Write all stdout/stderr to a logfile from the various commands
    with open(stdlog,'wb') as lfile:
        cmd_args = {
            'samplename': args.prefix,
            'tdir': tdir,
            'readsdir': args.readsdir,
            'reference': os.path.join(tdir, os.path.basename(args.reference)),
            'bamfile': bamfile,
            'flagstats': flagstats,
            'consensus': consensus,
            'vcf': vcf,
            'CN': CN,
            'trim_qual': args.trim_qual,
            'trim_outdir': os.path.join(tdir,'trimmed_reads'),
            'filtered_dir' : os.path.join(tdir, 'filtered'),
            'head_crop': args.head_crop,
            'minth': args.minth,
            'config': args.config,
            'platforms': args.platforms,
            'drop_ns': args.drop_ns,
            'index_min': args.index_min,
            'primer_info' : (args.primer_file, args.primer_seed, args.palindrom_clip, args.simple_clip)
        }

        # Best not to run across multiple cpu/core/threads on any of the pipeline steps
        # as multiple samples may be running concurrently already

        logger.debug( "Copying reference file {0} to {1}".format(args.reference,cmd_args['reference']) )
        shutil.copy( args.reference, cmd_args['reference'] )

        # Return code list
        rets = []
        logger.debug(cmd_args)
        #Filter
        def select_keys(d, keys):
            return dict( ((k, v) for k, v in d.items() if k in keys))

        #convert sffs to fastq

        print sh.convert_formats(cmd_args['readsdir'], _out=sys.stdout, _err=sys.stderr)
        #print sh.sff_to_fastq(cmd_args['readsdir'], _out=sys.stdout, _err=sys.stderr)
        try:
            if cmd_args['config']:
                __result = sh.ngs_filter(cmd_args['readsdir'], config=cmd_args['config'], outdir=cmd_args['filtered_dir'])
            else:
                filter_args = select_keys(cmd_args, ["drop_ns", "platforms", "index_min"])
                __result = sh.ngs_filter(cmd_args['readsdir'], outdir=cmd_args['filtered_dir'], **filter_args)
            logger.debug( 'ngs_filter: %s' % __result )
        except sh.ErrorReturnCode, e:
                logger.error(e.stderr)
                sys.exit(1)

        #Trim reads
        cmd = 'trim_reads {filtered_dir} -q {trim_qual} -o {trim_outdir} --head-crop {head_crop}'
        if cmd_args['config']:
            cmd += ' -c {config}'
        primer_info = cmd_args['primer_info']
        if primer_info[0]:
            cmd += " --primer-file %s --primer-seed %s --palindrome-clip %s --simple-clip %s " % primer_info
        p = run_cmd( cmd.format(**cmd_args), stdout=lfile, stderr=subprocess.STDOUT )
        rets.append( p.wait() )
        if rets[-1] != 0:
            logger.critical( "{0} did not exit sucessfully".format(cmd.format(**cmd_args)) )

        # Filter on index quality and Ns

        # Mapping
        with open(bwalog, 'wb') as blog:
            cmd = 'run_bwa_on_samplename {trim_outdir} {reference} -o {bamfile}'
            if cmd_args['config']:
                cmd += ' -c {config}'
            p = run_cmd( cmd.format(**cmd_args), stdout=blog, stderr=subprocess.STDOUT )
            # Wait for the sample to map
            rets.append( p.wait() )
            # Everything else is dependant on bwa finishing so might as well die here
            if rets[-1] != 0:
                cmd = cmd.format(**cmd_args)
                logger.critical( "{0} failed to complete sucessfully. Please check the log file {1} for more details".format(cmd,bwalog) )
                sys.exit(1)

        # Tag Reads
        cmd = 'tagreads {bamfile} -CN {CN}'
        if cmd_args['config']:
            cmd += ' -c {config}'
        p = run_cmd( cmd.format(**cmd_args), stdout=lfile, stderr=subprocess.STDOUT )
        r = p.wait()
        if r != 0:
            logger.critical( "{0} did not exit sucessfully".format(cmd.format(**cmd_args)) )
        rets.append( r )

        # Variant Calling
        cmd = 'base_caller {bamfile} {reference} {vcf} -minth {minth}'
        if cmd_args['config']:
            cmd += ' -c {config}'
        p = run_cmd( cmd.format(**cmd_args), stdout=lfile, stderr=subprocess.STDOUT )
        r = p.wait()
        if r != 0:
            logger.critical( "{0} did not exit sucessfully".format(cmd.format(**cmd_args)) )
        rets.append( r )
        if rets[-1] != 0:
            cmd = cmd.format(**cmd_args)
            logger.critical( '{0} failed to complete successfully'.format(cmd.format(**cmd_args)) )

        # Flagstats
        with open(flagstats,'wb') as flagstats:
            cmd = 'samtools flagstat {bamfile}'
            p = run_cmd( cmd.format(**cmd_args), stdout=flagstats, stderr=lfile, script_dir='' )
            r = p.wait()
            if r != 0:
                logger.critical( "{0} did not exit sucessfully".format(cmd.format(**cmd_args)) )
            rets.append( r )

        # Graphics
        cmd = 'graphsample {bamfile} -od {tdir}'
        p = run_cmd( cmd.format(**cmd_args), stdout=lfile, stderr=subprocess.STDOUT )
        r = p.wait()
        if r != 0:
            logger.critical( "{0} did not exit sucessfully".format(cmd.format(**cmd_args)) )
        rets.append( r )

        # Read Graphics
        fastqs = ' '.join( glob.glob( os.path.join( cmd_args['trim_outdir'], '*.fastq' ) ) )
        cmd = 'fqstats -o {0}.reads.png {1}'.format(cmd_args['bamfile'].replace('.bam',''),fastqs)
        p = run_cmd( cmd, stdout=lfile, stderr=subprocess.STDOUT )
        r = p.wait()
        if r != 0:
            logger.critical( "{0} did not exit sucessfully".format(cmd) )
        rets.append( r )

        # Consensus
        cmd = 'vcf_consensus {vcf} -i {samplename} -o {consensus}'
        p = run_cmd( cmd.format(**cmd_args), stdout=lfile, stderr=subprocess.STDOUT )
        r = p.wait()
        if r != 0:
            logger.critical( "{0} did not exit sucessfully".format(cmd.format(**cmd_args)) )
        rets.append( r )

        # If sum is > 0 then one of the commands failed
        if sum(rets) != 0:
            logger.critical( "!!! There was an error running part of the pipeline !!!" )
            logger.critical( "Please check the logfile {0}".format(logfile) )
            sys.exit( 1 )
        logger.info( "--- Finished {0} ---".format(args.prefix) )

        #subprocess.call( 'git add -A', cwd=tdir, shell=True, stdout=lfile, stderr=subprocess.STDOUT )
        #subprocess.call( 'git commit -am \'runsample\'', cwd=tdir, shell=True, stdout=lfile, stderr=subprocess.STDOUT )

        logger.debug( "Moving {0} to {1}".format( tdir, args.outdir ) )
        # Cannot log any more below this line as the log file will be moved in the following code
        if not os.path.isdir( args.outdir ):
            shutil.move( tdir, args.outdir )
        else:
            file_list = [os.path.join(tdir,m) for m in os.listdir(tdir)]
            for f in file_list:
                shutil.move( f, args.outdir )
Пример #27
0
import time
import string
import traceback # from debugging
import threading
import subprocess as sp
import os

my_file_path = os.path.dirname(os.path.realpath(__file__))
acl_file = os.path.join(my_file_path, 'acl')
update_script = os.path.join(my_file_path, 'updateACL')

buttonPin = "CSID5"
solenoid_pin = "CSID1"
baud_rate = 9600

logger = log.setup_logger('reader.log')

def throws(error_message):
    raise RuntimeError(error_message)

class Authenticator(object):
    '''
    classdocs
    '''
    @staticmethod
    def query(rfidkey):
        if os.path.exists(acl_file):
            with open(acl_file) as text:
                for line in text:
                    member = line.split('|')
                    id = member[0]
Пример #28
0
    #. Symlink all original .ab1 files into this directory
    #. Convert all .ab1 to .fastq 
#. Parse the sanger filename and create ReadsBySample/samplename directory
#. Symlink all .fastq and .ab1 files for that samplename from ReadData into Samplename directory

"""
import shutil
from os.path import *
import os
from glob import glob
from Bio import SeqIO
import re
import sys

import log
logger = log.setup_logger( basename(__file__), log.get_config() )

# For invalid formatted filenames
class InvalidFormat(Exception): pass

def sync_sanger( runpath, ngsdata ):
    rund = basename( runpath )
    rawd = join( ngsdata, 'RawData', 'Sanger', rund )
    readd = join( ngsdata, 'ReadData', 'Sanger', rund )

    sync_run( runpath, ngsdata )
    sync_readdata( rawd, ngsdata )
    link_reads( readd, ngsdata )

def sync_run( runpath, ngsdata ):
    '''
Пример #29
0
import signal
import time

# NB: pool should be initialized before importing
# any of cocaine-framework-python modules to avoid
# tornado ioloop dispatcher issues
import monitor_pool

from cocaine.asio.exceptions import LocatorResolveError
from cocaine.worker import Worker
import elliptics

import log

try:
    log.setup_logger('mm_cache_logging')
    logger = logging.getLogger('mm.init')
except LocatorResolveError:
    log.setup_logger()
    logger = logging.getLogger('mm.init')
    logger.warn('mm_cache_logging is not set up properly in '
        'cocaine.conf, fallback to default logging service')

from config import config
import storage
import cache
from db.mongo.pool import MongoReplicaSetClient
import infrastructure
import infrastructure_cache
import jobs
import node_info_updater
Пример #30
0
from hardware import *
from so import *
import log

##
##  MAIN
##
if __name__ == '__main__':
    log.setup_logger()
    log.logger.info('Starting emulator')

    ## create 3 programs
    ###################
    prg1 = Program(
        "prg1.exe",
        [ASM.CPU(2), ASM.IO(),
         ASM.CPU(3), ASM.IO(),
         ASM.CPU(2)])
    prg2 = Program("prg2.exe", [ASM.CPU(4), ASM.IO(), ASM.CPU(1)])
    prg3 = Program("prg3.exe", [ASM.CPU(3)])

    ## setup our hardware and set memory size to 25 "cells"
    HARDWARE.setup(25)
    # add programs to hardware hard disk
    HARDWARE.addProgram(prg1)
    HARDWARE.addProgram(prg2)
    HARDWARE.addProgram(prg3)

    ## new create the Operative System Kernel
    kernel = Kernel()
Пример #31
0
# -*- coding: utf-8 -*-
# @Time    : 2020/6/28 9:56
# @Author  : SwordLight
# @File    : run.py
import argparse
import json

import config
import log
from config import HEADER
from core.analyse_by_parser import analyse
from core.colors import red, white, end
from utils import extractHeaders, prompt

logger = log.setup_logger()

print('''%s
\tXSSLearner %sv1.0.0
%s''' % (red, white, end))
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--url", required=True, help="Target url")
parser.add_argument("--path", help="Params in url", action='store_true')
# parser 参数中的双引号需要使用\转义
parser.add_argument("--data",
                    help="Use post method to send the data(dict)",
                    type=str)  #{\"searchFor\":\"hi\",\"goButton\":\"go\"}
parser.add_argument('--header',
                    help='Add header',
                    dest='add_header',
                    nargs='?',
                    const=True)
Пример #32
0
def main():
    args, qsubargs = parse_args()
    # Qsub job?
    if qsubargs:
        runsampleargs, _ = split_args(' '.join(sys.argv[1:]))
        print pbs_job(runsampleargs, qsubargs)
        sys.exit(1)
    # So we can set the global logger
    global logger
    # Setup analysis directory
    if os.path.isdir(args.outdir):
        if os.listdir(args.outdir):
            raise AlreadyExists("{0} already exists and is not empty".format(
                args.outdir))
    else:
        os.makedirs(args.outdir)

    # tempdir root will be TMPDIR environ variable if it exists
    # unless outdir is set
    # allows user to specify TMPDIR somewhere else if they want such as
    # /dev/shm
    tmpdir = args.outdir
    # Directory analysis is run in will be inside of tmpdir
    tdir = tempfile.mkdtemp('runsample', args.prefix, dir=tmpdir)
    os.environ['TMPDIR'] = tdir

    bamfile = os.path.join(tdir, args.prefix + '.bam')
    flagstats = os.path.join(tdir, 'flagstats.txt')
    consensus = bamfile + '.consensus.fasta'
    vcf = bamfile + '.vcf'
    bwalog = os.path.join(tdir, 'bwa.log')
    stdlog = os.path.join(tdir, args.prefix + '.std.log')
    logfile = os.path.join(tdir, args.prefix + '.log')
    CN = args.CN

    # Set the global logger
    config = log.get_config(logfile)
    logger = log.setup_logger('runsample', config)

    #make_project_repo( tdir )

    logger.info("--- Starting {0} --- ".format(args.prefix))
    if args.config:
        logger.info("--- Using custom config from {0} ---".format(args.config))
    # Write all stdout/stderr to a logfile from the various commands
    with open(stdlog, 'wb') as lfile:
        cmd_args = {
            'samplename':
            args.prefix,
            'tdir':
            tdir,
            'readsdir':
            args.readsdir,
            'reference':
            os.path.join(tdir, os.path.basename(args.reference)),
            'bamfile':
            bamfile,
            'flagstats':
            flagstats,
            'consensus':
            consensus,
            'vcf':
            vcf,
            'CN':
            CN,
            'trim_qual':
            args.trim_qual,
            'trim_outdir':
            os.path.join(tdir, 'trimmed_reads'),
            'filtered_dir':
            os.path.join(tdir, 'filtered'),
            'head_crop':
            args.head_crop,
            'minth':
            args.minth,
            'config':
            args.config,
            'platforms':
            args.platforms,
            'drop_ns':
            args.drop_ns,
            'index_min':
            args.index_min,
            'primer_info': (args.primer_file, args.primer_seed,
                            args.palindrom_clip, args.simple_clip)
        }

        # Best not to run across multiple cpu/core/threads on any of the pipeline steps
        # as multiple samples may be running concurrently already

        logger.debug("Copying reference file {0} to {1}".format(
            args.reference, cmd_args['reference']))
        shutil.copy(args.reference, cmd_args['reference'])

        # Return code list
        rets = []
        logger.debug(cmd_args)

        #Filter
        def select_keys(d, keys):
            return dict(((k, v) for k, v in d.items() if k in keys))

        #convert sffs to fastq

        print sh.convert_formats(cmd_args['readsdir'],
                                 _out=sys.stdout,
                                 _err=sys.stderr)
        #print sh.sff_to_fastq(cmd_args['readsdir'], _out=sys.stdout, _err=sys.stderr)
        try:
            if cmd_args['config']:
                __result = sh.ngs_filter(cmd_args['readsdir'],
                                         config=cmd_args['config'],
                                         outdir=cmd_args['filtered_dir'])
            else:
                filter_args = select_keys(
                    cmd_args, ["drop_ns", "platforms", "index_min"])
                __result = sh.ngs_filter(cmd_args['readsdir'],
                                         outdir=cmd_args['filtered_dir'],
                                         **filter_args)
            logger.debug('ngs_filter: %s' % __result)
        except sh.ErrorReturnCode, e:
            logger.error(e.stderr)
            sys.exit(1)

        #Trim reads
        cmd = 'trim_reads {filtered_dir} -q {trim_qual} -o {trim_outdir} --head-crop {head_crop}'
        if cmd_args['config']:
            cmd += ' -c {config}'
        primer_info = cmd_args['primer_info']
        if primer_info[0]:
            cmd += " --primer-file %s --primer-seed %s --palindrome-clip %s --simple-clip %s " % primer_info
        p = run_cmd(cmd.format(**cmd_args),
                    stdout=lfile,
                    stderr=subprocess.STDOUT)
        rets.append(p.wait())
        if rets[-1] != 0:
            logger.critical("{0} did not exit sucessfully".format(
                cmd.format(**cmd_args)))

        # Filter on index quality and Ns

        # Mapping
        with open(bwalog, 'wb') as blog:
            cmd = 'run_bwa_on_samplename {trim_outdir} {reference} -o {bamfile}'
            if cmd_args['config']:
                cmd += ' -c {config}'
            p = run_cmd(cmd.format(**cmd_args),
                        stdout=blog,
                        stderr=subprocess.STDOUT)
            # Wait for the sample to map
            rets.append(p.wait())
            # Everything else is dependant on bwa finishing so might as well die here
            if rets[-1] != 0:
                cmd = cmd.format(**cmd_args)
                logger.critical(
                    "{0} failed to complete sucessfully. Please check the log file {1} for more details"
                    .format(cmd, bwalog))
                sys.exit(1)

        # Tag Reads
        cmd = 'tagreads {bamfile} -CN {CN}'
        if cmd_args['config']:
            cmd += ' -c {config}'
        p = run_cmd(cmd.format(**cmd_args),
                    stdout=lfile,
                    stderr=subprocess.STDOUT)
        r = p.wait()
        if r != 0:
            logger.critical("{0} did not exit sucessfully".format(
                cmd.format(**cmd_args)))
        rets.append(r)

        # Variant Calling
        cmd = 'base_caller {bamfile} {reference} {vcf} -minth {minth}'
        if cmd_args['config']:
            cmd += ' -c {config}'
        p = run_cmd(cmd.format(**cmd_args),
                    stdout=lfile,
                    stderr=subprocess.STDOUT)
        r = p.wait()
        if r != 0:
            logger.critical("{0} did not exit sucessfully".format(
                cmd.format(**cmd_args)))
        rets.append(r)
        if rets[-1] != 0:
            cmd = cmd.format(**cmd_args)
            logger.critical('{0} failed to complete successfully'.format(
                cmd.format(**cmd_args)))

        # Flagstats
        with open(flagstats, 'wb') as flagstats:
            cmd = 'samtools flagstat {bamfile}'
            p = run_cmd(cmd.format(**cmd_args),
                        stdout=flagstats,
                        stderr=lfile,
                        script_dir='')
            r = p.wait()
            if r != 0:
                logger.critical("{0} did not exit sucessfully".format(
                    cmd.format(**cmd_args)))
            rets.append(r)

        # Graphics
        cmd = 'graphsample {bamfile} -od {tdir}'
        p = run_cmd(cmd.format(**cmd_args),
                    stdout=lfile,
                    stderr=subprocess.STDOUT)
        r = p.wait()
        if r != 0:
            logger.critical("{0} did not exit sucessfully".format(
                cmd.format(**cmd_args)))
        rets.append(r)

        # Read Graphics
        fastqs = ' '.join(
            glob.glob(os.path.join(cmd_args['trim_outdir'], '*.fastq')))
        cmd = 'fqstats -o {0}.reads.png {1}'.format(
            cmd_args['bamfile'].replace('.bam', ''), fastqs)
        p = run_cmd(cmd, stdout=lfile, stderr=subprocess.STDOUT)
        r = p.wait()
        if r != 0:
            logger.critical("{0} did not exit sucessfully".format(cmd))
        rets.append(r)

        # Consensus
        cmd = 'vcf_consensus {vcf} -i {samplename} -o {consensus}'
        p = run_cmd(cmd.format(**cmd_args),
                    stdout=lfile,
                    stderr=subprocess.STDOUT)
        r = p.wait()
        if r != 0:
            logger.critical("{0} did not exit sucessfully".format(
                cmd.format(**cmd_args)))
        rets.append(r)

        # If sum is > 0 then one of the commands failed
        if sum(rets) != 0:
            logger.critical(
                "!!! There was an error running part of the pipeline !!!")
            logger.critical("Please check the logfile {0}".format(logfile))
            sys.exit(1)
        logger.info("--- Finished {0} ---".format(args.prefix))

        #subprocess.call( 'git add -A', cwd=tdir, shell=True, stdout=lfile, stderr=subprocess.STDOUT )
        #subprocess.call( 'git commit -am \'runsample\'', cwd=tdir, shell=True, stdout=lfile, stderr=subprocess.STDOUT )

        logger.debug("Moving {0} to {1}".format(tdir, args.outdir))
        # Cannot log any more below this line as the log file will be moved in the following code
        if not os.path.isdir(args.outdir):
            shutil.move(tdir, args.outdir)
        else:
            file_list = [os.path.join(tdir, m) for m in os.listdir(tdir)]
            for f in file_list:
                shutil.move(f, args.outdir)
Пример #33
0
#!/usr/bin/env python

from subprocess import Popen, PIPE
import sys
import json

from collections import namedtuple
from itertools import izip

from matplotlib.lines import Line2D

import log
import samtools
logger = log.setup_logger(__name__, log.get_config())

# Alias our region strings
G = 'Gap'
N = 'Normal'
LC = 'LowCoverage'
LQ = 'LowQuality'
LCQ = 'LowCovQual'

# As a list
REGIONTYPES = [
    G, N, LC, LQ, LCQ
]

def parse_pileup( pileup ):
    '''
    Parses the raw pileup output from samtools mpileup and returns a dictionary
    with stats for every reference in the pileup
Пример #34
0
def exec_demo(demo_params):
    """
    Execute grid search over the param_grid defined in demo_params,
    using the data from the crowd-sourced annotations.
    :param demo_params:
    :return:
    """
    logger = log.setup_logger(__name__)

    #ignore this line... It's a long story.
    feature_type = "ver1"

    # extract base parameters
    demo_id, name, train_file, dev_file, test_file, output_folder = utils.extract_base_demo_params(
        demo_params)
    print_log(
        logger,
        "\n".join([str((key, demo_params[key])) for key in list(demo_params)]))

    train_file_extra_points = demo_params["train_file_extra_points"]
    param_grid = demo_params["param_grid"]

    # define the scoring function for the grid search
    my_scorer = sklearn.metrics.make_scorer(metrics.my_scorer.get_evaluation)

    # track some result from the grid search used for tuning the hyperparameter delta
    fscores = {}
    epsilons_list = {}
    max_iterations_list = {}
    best_eval = {"F-score": 0}

    # pre-processing the data (remove tags and other stuff)
    print_log(logger, "Making datasets...")
    task1_train_data = data_parsers.make_dataset.parse_file(open(train_file))
    dev_data = data_parsers.make_dataset.parse_file(open(dev_file))
    test_data = data_parsers.make_dataset.parse_file(open(test_file))
    extra_points_train_data = data_parsers.make_dataset.parse_file(
        open(train_file_extra_points))

    train_data = task1_train_data + extra_points_train_data

    print_log(logger, "train data size: %s" % len(train_data))
    print_log(logger, "development data size: %s" % len(dev_data))
    print_log(logger, "test data size: %s" % len(test_data))

    # compute the maximum delta possible (from the length of the longest word
    # in the train and development set)
    max_delta = max(utils.find_max_len(train_data),
                    utils.find_max_len(dev_data))
    if max_delta > settings.MAX_ALLOWABLE_DELTA:
        max_delta = settings.MAX_ALLOWABLE_DELTA
    print_log(logger, "max delta: %s" % max_delta)

    # repeat the grid search for each possible value of delta
    for delta in range(1, max_delta + 1):
        os.makedirs(output_folder + "/%02d" % delta, exist_ok=True)

        print_log(logger, "Training with delta=%s" % delta)
        X_train, y_train = features.extract_features.get_features_and_labels(
            train_data, delta, feature_type)
        X_dev, y_dev = features.extract_features.get_features_and_labels(
            dev_data, delta, feature_type)
        X_test, y_test = features.extract_features.get_features_and_labels(
            test_data, delta, feature_type)

        model = utils.run_grid_search(X_train, y_train, X_dev, y_dev,
                                      param_grid, my_scorer)

        best_cv_epsilon = model.best_params_["epsilon"]
        best_cv_max_iterations = model.best_params_["max_iterations"]

        # the best score will be considered in order to pick the best model
        fscores[delta] = model.best_score_
        epsilons_list[delta] = best_cv_epsilon
        max_iterations_list[delta] = best_cv_max_iterations

        print_log(
            logger,
            "Best params for delta %02d: max_iterations=%d\tepsilon=%.2E" %
            (delta, best_cv_max_iterations, best_cv_epsilon))
        print_log(logger, "Best CV score: " + str(model.best_score_))

        # test the model on the test set. NOTICE: the result will not be considered for the choice
        # of the hyperparameter delta!
        print_log(logger, "***Predict test with the grid search model:***")

        y_test_pred = model.predict(X_test)
        test_eval = metrics.evaluation.get_evaluation(feature_type, y_test,
                                                      y_test_pred)
        print_log(
            logger, "F-score on test (grid search with delta=%s): %s" %
            (delta, test_eval["F-score"]))

        # save some result from the grid search
        curpath = output_folder + "/%02d" % delta + "/" + name + "_" + "%02d" % delta
        utils.write_model(model, open(curpath + "_gridsearch.model", "wb+"))
        utils.write_predictions(feature_type, open(test_file), y_test_pred,
                                open(curpath + ".pred", "w+"))
        utils.write_evaluation(test_eval, open(curpath + ".eval", "w+"))
        utils.write_fails(open(test_file), y_test, y_test_pred,
                          open(curpath + ".fails", "w+"), feature_type)
        details.print_gridsearch_details(model,
                                         file=open(
                                             curpath + "_gridsearch.details",
                                             "w+"))

        print_log(logger, "#" * 50)

    print_log(logger, "-" * 50)
    max_fscore = max(fscores.values())
    max_fscore_delta = [i for i in fscores.keys()
                        if fscores[i] == max_fscore][0]
    best_model_num = max_fscore_delta
    best_epsilon = epsilons_list[best_model_num]
    best_max_iterations = max_iterations_list[best_model_num]

    freport = open(output_folder + "/report.txt", "w+")
    print_log(
        logger,
        "The best model found is the one with delta: %s" % best_model_num)
    print_log(
        logger, "With best parameters: max_iterations=%s, epsilon=%s" %
        (best_max_iterations, best_epsilon))
    print_log(logger, "CV F-score: %s" % max_fscore)
    print("The best model found is the one with delta: %s" % best_model_num,
          file=freport)
    print("With best parameters: max_iterations=%s, epsilon=%s" %
          (best_max_iterations, best_epsilon),
          file=freport)
    print("CV F-score: %s" % max_fscore, file=freport)

    best_model_path = output_folder + "/%02d" % best_model_num + "/" + name + "_" + "%02d" % best_model_num + "_gridsearch.model"
    best_model = pickle.load(open(best_model_path, "rb"))
    X_test, y_test = features.extract_features.get_features_and_labels(
        test_data, best_model_num, feature_type)

    y_pred = best_model.predict(X_test)
    delta_evaluation = metrics.evaluation.get_evaluation(
        feature_type, y_test, y_pred)
    print_log(
        logger, "delta: %s\tF-score, : %s" %
        (best_model_num, delta_evaluation["F-score"]))
    print("F-score on test set: %s" % delta_evaluation["F-score"],
          file=freport)
Пример #35
0
# NB: pool should be initialized before importing
# any of cocaine-framework-python modules to avoid
# tornado ioloop dispatcher issues
import monitor_pool

from cocaine.worker import Worker
from cocaine.futures import chain

sys.path.append('/usr/lib')

import msgpack

import elliptics

import log
log.setup_logger()
logger = logging.getLogger('mm.init')

# storage should be imported before balancer
# TODO: remove this dependency
import storage
import balancer
from db.mongo.pool import MongoReplicaSetClient
import helpers
import history
import infrastructure
import jobs
import couple_records
import minions
import node_info_updater
from planner import Planner
Пример #36
0
from glob import glob
from os.path import *
import os
import sys
import re
import log
from Bio import SeqIO
import gzip

logger = log.setup_logger(__name__, log.get_config())

ROCHE_FILE = '\S+?(?:__[0-9]){0,1}__(?:TI|RL)\d+__\d{4}_\d{2}_\d{2}__\w+.(sff|fastq)'
'''
Matches roche sff or fastq files

sample__region__barcode__year_month_day__type.filetype
'''
ROCHE_ID = '[A-Z0-9]{14}'
'''
Matches Roche accessions which are just 14 Alpha Numeric uppercase characters

@AAAAAAAAAAAAAA
'''
IONTORRENT_FILE = '\S+?__[0-9]__IX\d{3}__\d{4}_\d{2}_\d{2}__\w+.(sff|fastq)'
'''
Matches IonTorrent file names(essentially same as roche)

sample__region__barcode__year__month__day__type.filetype
'''
IONTORRENT_ID = '[A-Z]{5}:[0-9]+:[0-9]+'
'''
Пример #37
0
import sys
import os
from os.path import *
import subprocess
import argparse

import bqd, graph_qualdepth as qd
import samtools
from bam_to_qualdepth import set_unmapped_mapped_reads
import json
import log

logc = log.get_config( 'graphsample.log' )
logger = log.setup_logger( 'graphsample', logc )

def main():
    args = parse_args()
    args = handle_args( args )
    if not args.qualdepth:
        jfile = make_json( args.bamfile, args.outpath )
    else:
        jfile = args.qualdepth
    pngfile = make_image( jfile, args.outpath )

def make_json( bamfile, outpathprefix ):
    pileup = samtools.nogap_mpileup(bamfile)
    stats = bqd.parse_pileup( pileup )
    set_unmapped_mapped_reads( bamfile, stats )
    outfile = outpathprefix + '.qualdepth.json'
    with open( outfile, 'w' ) as fh:
        json.dump( stats, fh )
Пример #38
0
import logging
from multiprocessing import Manager, Lock, Process
from threading import Thread
from qlf_models import QLFModels

qlf_root = os.getenv('QLF_ROOT')
cfg = configparser.ConfigParser()

cfg.read('%s/framework/config/qlf.cfg' % qlf_root)
qlconfig = cfg.get('main', 'qlconfig')
logmain = cfg.get('main', 'logfile')
logpipeline = cfg.get('main', 'logpipeline')
desi_spectro_redux = cfg.get('namespace', 'desi_spectro_redux')

logger = logging.getLogger("main_logger")
pipe_logger = setup_logger('logpipeline', logpipeline)


class QLFProcess(object):
    """ Class responsible for managing Quick Look pipeline process. """
    def __init__(self, data):
        self.pipeline_name = 'Quick Look'
        self.data = data
        self.models = QLFModels()

        output_dir = os.path.join('exposures', self.data.get('night'),
                                  self.data.get('zfill'))

        output_full_dir = os.path.join(desi_spectro_redux, output_dir)

        # Remove old dir
Пример #39
0
from glob import glob
from os.path import *
import numpy as np
import matplotlib.pyplot as plt

from nose.tools import ok_, eq_

from datetime import datetime
import log

logc = log.get_config()
logger = log.setup_logger( 'graph_times', logc )

def main():
    ss = start_stop( 'Projects' )
    logger.info( "Plotting all projects inside of {0}".format('Projects') )
    x,y = [],[]
    samplenames = sorted(ss.keys())
    for sn in samplenames:
        x.append( sn )
        y.append( ss[sn] )
    fig = plt.figure()                                                                                                                                                                                                                                                                
    fig.set_size_inches( 20.0, 8.0 )
    fig.suptitle( 'Pipeline Time per Sample' )
    ax = plt.gca()
    ax.plot( range(len(x)), y )
    ax.set_xlim([0,len(x)-1])
    ax.set_ylim([0,max(y)])
    ax.set_xticks( range(0,len(x)) )
    ax.set_xticklabels( x, rotation='vertical' )
    ax.set_ylabel( 'Seconds' )
Пример #40
0
import RunningStats
from collections import deque

import fft
import hardware_adapter
import services.Service
import configuration_manager
from log import setup_logger

cm = configuration_manager.Configuration()

GPIO_LEN = 6

config_path = os.path.dirname(
    os.path.realpath(__file__)) + '/../config/mopidy.conf'
logger = setup_logger("Light Show Service")
decay = np.zeros(GPIO_LEN, dtype='float32')
fft_calc = fft.FFT(cm.light_show.chunk_size, cm.light_show.sample_rate,
                   GPIO_LEN, cm.light_show.min_frequency,
                   cm.light_show.max_frequency,
                   cm.light_show.custom_channel_mapping,
                   cm.light_show.custom_channel_frequencies, 1)


class LightShowService(services.Service.Service):
    def __init__(self):
        super().__init__()
        self.requires_gpio = True
        self.process = None

    def run(self):
Пример #41
0
def main():
    args = docopt(__doc__, version='zenfeed ' + VERSION)

    log_arg, log_level = args['--log'].rsplit(':', 1)
    if log_arg not in ('stderr', 'syslog'):
        setup_logger(type='file',
                     filename=path(log_arg).abspath(),
                     level=log_level)
    else:
        setup_logger(type=log_arg, level=log_level)

    logger.info('Zenfeed %s booting...', VERSION)

    if args['genstatic']:
        return genstatic(args['PATH'])

    port = int(args['--port'])

    cache_disabled = args['--no-cache']

    path_prefix = args['--prefix']
    if path_prefix.endswith('/'):
        path_prefix = path_prefix[:-1]
    if path_prefix and not path_prefix.startswith('/'):
        path_prefix = '/' + path_prefix

    fixed_language = args['--lang']
    if fixed_language == 'browser':
        fixed_language = None
    else:
        logger.info('Language fixed to "%s"', fixed_language)
        if (fixed_language not in LANGUAGES
                and fixed_language.split('_', 1)[0] not in LANGUAGES):
            return logger.critical('Fixed language not supported !')

    fixed_timezone = args['--tz']
    logger.info('Timezone fixed to "%s"', fixed_timezone)
    if fixed_timezone not in all_timezones:
        return logger.critical('Fixed timezone not supported !')

    db_uri = args['--database']
    if db_uri == ':memory:':
        db_uri = 'sqlite://'
    elif not "://" in db_uri:
        db_uri = 'sqlite:///%s' % path(db_uri).abspath()

    import app as app_module
    app = app_module.create_flask_app(prefix=path_prefix)
    app.config.update(
        DEBUG=args['--debug'],
        SQL_DEBUG=False,
        SECRET_KEY=urandom(32),
        SQLALCHEMY_DATABASE_URI=db_uri,
        FAVICON_DIR=path(args['--favicons']).abspath(),
        FIXED_LANGUAGE=fixed_language,
        FIXED_TIMEZONE=fixed_timezone,
        CACHE_ENABLED=not cache_disabled,
        PATH_PREFIX=path_prefix,
    )
    Cache(app)

    from models import setup_tables, Feed
    patch_socket()
    patch_ssl()
    setup_tables()

    from deadline_manager import deadlineManager
    import views
    from werkzeug.contrib.fixers import ProxyFix
    app.wsgi_app = ProxyFix(app.wsgi_app)

    feeds = Feed.query.all()
    deadlineManager.favicon_dir = path(args['--favicons']).abspath()
    deadlineManager.launch_deadline_workers(feeds)
    deadlineManager.start()

    logger.info("Server started at port %d (prefix: %s/)", port, path_prefix)
    if args['--debug']:
        logger.warning("DEBUG mode activated")
        app.run(host='0.0.0.0', port=port, debug=True)
    else:
        from gevent.wsgi import WSGIServer
        http_server = WSGIServer(('0.0.0.0', port), app)
        try:
            http_server.serve_forever()
        except KeyboardInterrupt:
            pass
Пример #42
0
                    index="osm",
                    doc_type="diff",
                    id=config.diffs_sequence,
                    body=action)
        except Exception as e:
            logger.error('could not store diff in ES')
            logger.debug(e.message)


if __name__ == '__main__':

    #arg parsing
    arguments = docopt(__doc__, version=__version__)

    #set up logging
    log.setup_logger(debug=arguments['--debug'])
    logger = log.get_logger()

    # instantiate a changesets retriever
    changesets_retriever = ChangesetsMetaRetrieverDaemon(
        os.path.join(
            config.tempdir,
            '{}.pid'.format('changesets_{}'.format(__name__))))

    # and a changesets retriever
    diff_retriever = diffRetrieverDaemon(
        os.path.join(
            config.tempdir,
            '{}.pid'.format('diff_{}'.format(__name__))))

    # handle debugging mode
Пример #43
0
def main():
	import log
	args = get_args()
	logfile = log.get_logfile_path(args.manifest)
	log.setup_logger(logfile=logfile, debug=args.debug)
	run(args)
Пример #44
0
Optimizations from work by Scott Driscoll:
http://www.instructables.com/id/Raspberry-Pi-Spectrum-Analyzer-with-RGB-LED-Strip-/

Third party dependencies:

numpy: for array support - http://www.numpy.org/
rpi-audio-levels - https://bitbucket.org/tom_slick/rpi-audio-levels (modified for lightshowpi)
"""

from numpy import *
import math
from log import setup_logger

from rpi_audio_levels import AudioLevels

logging = setup_logger('FFT')


class FFT(object):
    def __init__(self,
                 chunk_size,
                 sample_rate,
                 num_bins,
                 min_frequency,
                 max_frequency,
                 custom_channel_mapping,
                 custom_channel_frequencies,
                 input_channels=2):
        """
        :param chunk_size: chunk size of audio data
        :type chunk_size: int
Пример #45
0
import subprocess
import os
import argparse
import sys
from os.path import basename, join, isdir, dirname, expandvars
from glob import glob
import tempfile
import reads
import shlex
import data
from ngs_mapper import compat

import log
lconfig = log.get_config()
logger = log.setup_logger( 'trim_reads', lconfig )

def main():
    args = parse_args()
    trim_reads_in_dir(
        args.readsdir,
        args.q,
        args.outputdir,
        head_crop=args.headcrop,
        platforms=args.platforms,
        primer_info=[args.primer_file, args.primer_seed, args.palindrom_clip, args.simple_clip]
    )

def trim_reads_in_dir( *args, **kwargs ):
    '''
        Trims all read files in a given directory and places the resulting files into out_path directory
Пример #46
0
import math
import Platform
from log import setup_logger
from time import sleep

PINS = [0, 2, 3, 21, 22, 23]
RED_PIN = PINS[2]
GREEN_PIN = PINS[1]
BLUE_PIN = PINS[0]

is_a_raspberryPi = Platform.platform_detect() == 1
is_gpio_enabled = False
logger = setup_logger("Hardware Adapter")

if is_a_raspberryPi:
    import wiringpi
else:
    # if this is not a RPi you can't run wiringpi so lets load
    # something in its place
    import wiring_pi as wiringpi
    logger.info("Detected: Not running on a Raspberry Pi")

wiringpi.wiringPiSetup()


def enable_gpio():
    global is_gpio_enabled
    """Attempts to take hold of the gpio from wiring pi."""
    for pin in PINS:
        wiringpi.softPwmCreate(pin, 0, 100)
Пример #47
0
    def __init__(
            self, jobname, geometry, kspace=KSpaceRectangular(),
            resolution=defaults.default_resolution,
            mesh_size=defaults.default_mesh_size,
            numbands=defaults.default_numbands,
            initcode=defaults.default_initcode,
            runcode=defaults.default_runcode,
            postcode=defaults.default_postcode,
            modes=None,
            work_in_subfolder=True, clear_subfolder=True,
            logger=True, quiet=defaults.isQuiet):
        """Create a simulation object with all parameters describing the
        simulation, including a unique jobname (all generated filenames
        will include this name), the geometry (pyMPB Geometry object),
        the kspace (a pyMPB KSpace object), the resolution, mesh_size
        (see MPB docs), number of bands to calculate and some optional
        strings with Scheme code which will be added to the MPB .ctl
        file as initialization code (initcode), as run commands
        (runcode) and as code executed after the simulation (postcode).

        If modes is None (default), automatically detect the simulated
        modes from runcode, otherwise specify a list of modes (e.g. 'te',
        'zeven' etc.). Note that modes are used to extract (grep) the
        frequencies and other data from MPB output or assign exported
        files. Sometimes data in the output might need to be
        additionally marked with a prefix, e.g. "sim-tefreqs:" for
        te-mode frequencies which should solely be exported while normal
        "tefreqs:" output should be ignored. In this case the mode
        should be called "sim-te" where the "sim-" part acts as an
        optional prefix. Only if no "sim-te<somedata>:" lines are found
        in the output, "te<somedata>:" lines will also be searched and
        the data exported. The prefix will always be ommited in
        filenames of exported data.

        If work_in_subfolder is True (default), all simulation and log
        output will be placed in a separate subdirectory under the
        current working directory called like the jobname.
        work_in_subfolder can also be a custom subfolder name,
        including any path, if not to be placed in current working
        directory.

        Set clear_subfolder to True (default) if you want this
        subfolder to be emptied (will make backup if there is an old
        folder with the same name). clear_subfolder should be False if
        you want to do postprocessing on existing simulation data.

        If logger is True (default), a jobname.log file will be created
        with all pyMPB output and errors. This output will also go to
        stdout if quiet is False. Alternatively, set logger to a
        customized logger (any object with log(level, msg, *args,
        **kwargs) method).

        """

        self.jobname = jobname
        self.geometry = geometry
        self.kspace = kspace
        self.initcode = initcode
        self.postcode = postcode
        self.resolution = resolution
        self.meshsize = mesh_size
        self.numbands = numbands
        self.quiet = quiet
        self.runcode = runcode

        self.work_in_subfolder = work_in_subfolder
        self.clear_subfolder = clear_subfolder
        if isinstance(work_in_subfolder, bool):
            if work_in_subfolder:
                # create default subfolder from jobname:
                self.workingdir = path.abspath(
                    path.join(path.curdir, jobname))
            else:
                # work here, no subfolder:
                self.workingdir = path.abspath(path.curdir)
        else:
            # hopefully a string
            self.workingdir = path.abspath(
                    path.join(path.curdir, work_in_subfolder))

        # the .ctl file that MPB will use:
        self.ctl_file = jobname + '.ctl'
        # a date & time stamp added to log and output filenames:
        dtstamp = ('_{0.tm_year}-{0.tm_mon:02}-{0.tm_mday:02}'
                   '_{0.tm_hour:02}-{0.tm_min:02}-'
                   '{0.tm_sec:02}').format(time.localtime())
        # the output file, where all MPB output will go:
        self.out_file = path.join(self.workingdir, jobname + dtstamp + '.out')
        # a log file, where information from pyMPB will go:
        self.log_file = path.join(self.workingdir, jobname + dtstamp + '.log')
        # the file where MPB usually saves the dielectric:
        self.eps_file =  path.join(self.workingdir, 'epsilon.h5')

        # logger is not setup yet, because the log file might be placed in a
        # subfolder that still needs to be created. But, I want to log that
        # I created a new directory. So make a simple log buffer:
        to_log = []

        to_log.append('Working in directory ' + self.workingdir)
        if self.work_in_subfolder:
            if path.exists(self.workingdir):
                to_log.append('directory exists already: ' + self.workingdir)
                if self.clear_subfolder:
                    # directory exists, make backup
                    backupdir = self.workingdir + '_bak'
                    if path.exists(backupdir):
                        # previous backup exists already, remove old
                        # backup, but keep .log and .out files (they have
                        # unique names):
                        keepers = (glob1(self.workingdir + '_bak', '*.log') +
                                glob1(self.workingdir + '_bak', '*.out'))
                        to_log.append(
                            ('removing existing backup {0}, but keeping {1}'
                            ' old log and output files').format(
                                backupdir, len(keepers)))
                        for f in keepers:
                            rename(path.join(backupdir, f),
                                path.join(self.workingdir, f))
                        rmtree(backupdir)
                        to_log.append(backupdir + ' removed')
                    # rename current (old) dir to backup:
                    rename(self.workingdir, backupdir)
                    to_log.append('existing ' + self.workingdir +
                                  ' renamed to ' + backupdir)
                    # make new empty working directory:
                    mkdir(self.workingdir)
                    to_log.append(
                        'created directory ' + self.workingdir + '\n')
                else:
                    to_log.append('working in existing directory.')
            else:
                # make new empty working directory:
                mkdir(self.workingdir)
                to_log.append('created directory ' + self.workingdir + '\n')

        if logger:
            if hasattr(logger, 'log') and callable(logger.log):
                # a custom logger was given as parameter, use it:
                log.logger = logger
            else:
                # Create the logger. Afterwards, we can also use
                # log.info() etc. in other modules. All status, logging
                # and stderr output will go through this logger (except
                # MPB's output during simulation):
                log.setup_logger(
                    'root.' + self.jobname, self.log_file, self.quiet,
                    redirect_stderr=True)

        # now we can log the stuff from before:
        if to_log:
            log.info('\n' + '\n'.join(to_log))
        del to_log

        # get modes from runcode:
        if modes is None:
            self.modes = re.findall(
                "\(run[-]?(.*?)[\s\)]", runcode, re.MULTILINE)
        else:
            self.modes = modes
        # make them unique (some might occur multiple times in runcode):
        self.modes = list(set(self.modes))

        self.number_of_tiles_to_output = defaults.number_of_tiles_to_output

        # In 3D, there are no pure tm or te modes. MPB renames them
        # automatically to zodd and zeven, respectively. Do the same:
        if self.geometry.is3D:
            for i, mode in enumerate(self.modes):
                if mode == 'te':
                    log.info('In 3D, there is no pure TE mode. '
                             'I will change it to zeven.')
                    self.modes[i] = 'zeven'
                if mode == 'tm':
                    log.info('In 3D, there is no pure TM mode. '
                             'I will change it to zodd.')
                    self.modes[i] = 'zodd'

        log.info("working with modes: " + str(self.modes) + '\n')

        new_environ_dict = {
            'GUILE_WARN_DEPRECATED': 'no',
            # load scheme files also from pyMPB directory (e.g. dos.scm):
            'GUILE_LOAD_PATH' : path.dirname(path.abspath(graphics.__file__))}
        environ.update(new_environ_dict)
        log.info('added to environment:' +
                 ''.join(['\n  {0}={1}'.format(key, environ[key]) for key in
                         new_environ_dict.keys()]))

        log.info(
            'pyMPB Simulation created with following properties:' +
            ''.join(['\npyMPBprop: {0}={1!r}'.format(key, val) for key, val in
                self.__dict__.items()]) + '\n\n')
Пример #48
0
# -*- coding: utf-8 -*-
# @Time    : 2020/6/19 16:12
# @Author  : SwordLight
# @File    : utils.py
import os
import random
import re
import tempfile
from urllib import parse

from config import defaultEditor
from core.colors import white, yellow
from log import setup_logger

logger = setup_logger(__name__)


def gen_scout_str(length=6):
    """产生指定长度的探子字符串"""
    chars = "QWERTYUIOPASDFGHJKLZXCVBNMqwertyuiopasdfghjklzxcvbnm0123456789"
    scout = ''
    for i in range(length):
        scout += random.choice(chars)
    return scout


def get_query_dict(query_str='name=hi&age=20'):
    # name=hi&age=20  todo 之后 参数需要兼容 \  \\这种格式的
    parts = query_str.split('&')
    query_dict = {}
    for part in parts:
Пример #49
0
#!/usr/bin/env python
import logging
import signal
import sys

from cocaine.asio.exceptions import LocatorResolveError
from cocaine.worker import Worker

import log

try:
    log.setup_logger("mm_inventory_logging")
    logger = logging.getLogger("mm.init")
except LocatorResolveError:
    log.setup_logger()
    logger = logging.getLogger("mm.init")
    logger.warn("mm_inventory_logging is not set up properly in " "cocaine.conf, fallback to default logging service")

from config import config

# TODO: rename inv module to 'inventory' when switched to using inventory worker
import inv as inventory
import helpers


def init_inventory_worker(worker):
    helpers.register_handle_wne(worker, inventory.Inventory.get_dc_by_host)


DEFAULT_DISOWN_TIMEOUT = 2
Пример #50
0
def main():
    setup_logger()
    img = cv2.imread(DEFAULT_IMAGE_PATH, 0)
    resized_img = image_correction(img)
    generate_ascii(resized_img)
Пример #51
0
def main():
    args = docopt(__doc__, version='zenfeed ' + VERSION)

    log_arg, log_level = args['--log'].rsplit(':', 1)
    if log_arg not in ('stderr', 'syslog'):
        setup_logger(type='file', filename=path(log_arg).abspath(),
                     level=log_level)
    else:
        setup_logger(type=log_arg, level=log_level)

    logger.info('Zenfeed %s booting...', VERSION)

    if args['genstatic']:
        return genstatic(args['PATH'])

    port = int(args['--port'])

    cache_disabled = args['--no-cache']

    path_prefix = args['--prefix']
    if path_prefix.endswith('/'):
        path_prefix = path_prefix[:-1]
    if path_prefix and not path_prefix.startswith('/'):
        path_prefix = '/' + path_prefix

    fixed_language = args['--lang']
    if fixed_language == 'browser':
        fixed_language = None
    else:
        logger.info('Language fixed to "%s"', fixed_language)
        if (fixed_language not in LANGUAGES
            and fixed_language.split('_', 1)[0] not in LANGUAGES):
            return logger.critical('Fixed language not supported !')

    fixed_timezone = args['--tz']
    logger.info('Timezone fixed to "%s"', fixed_timezone)
    if fixed_timezone not in all_timezones:
        return logger.critical('Fixed timezone not supported !')

    db_uri = args['--database']
    if db_uri == ':memory:':
        db_uri = 'sqlite://'
    elif not "://" in db_uri:
        db_uri = 'sqlite:///%s' % path(db_uri).abspath()

    import app as app_module
    app = app_module.create_flask_app(prefix=path_prefix)
    app.config.update(
        DEBUG = args['--debug'],
        SQL_DEBUG = False,
        SECRET_KEY = urandom(32),
        SQLALCHEMY_DATABASE_URI = db_uri,
        FAVICON_DIR = path(args['--favicons']).abspath(),
        FIXED_LANGUAGE = fixed_language,
        FIXED_TIMEZONE = fixed_timezone,
        CACHE_ENABLED = not cache_disabled,
        PATH_PREFIX = path_prefix,
    )
    Cache(app)

    from models import setup_tables, Feed
    patch_socket()
    patch_ssl()
    setup_tables()

    from deadline_manager import deadlineManager
    import views
    from werkzeug.contrib.fixers import ProxyFix
    app.wsgi_app = ProxyFix(app.wsgi_app)

    feeds = Feed.query.all()
    deadlineManager.favicon_dir = path(args['--favicons']).abspath()
    deadlineManager.launch_deadline_workers(feeds)
    deadlineManager.start()

    logger.info("Server started at port %d (prefix: %s/)", port, path_prefix)
    if args['--debug']:
        logger.warning("DEBUG mode activated")
        app.run(host='0.0.0.0', port=port, debug=True)
    else:
        from gevent.wsgi import WSGIServer
        http_server = WSGIServer(('0.0.0.0', port), app)
        try:
            http_server.serve_forever()
        except KeyboardInterrupt:
            pass
Пример #52
0
import json

from django.http import JsonResponse
from django.utils import timezone
from rest_framework.decorators import api_view, permission_classes
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.generics import ListAPIView
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import IsAuthenticated

from log import setup_logger
from .serializers import RestaurantSerializer, MealSerializer, OrderSerializer
from ..models import Restaurant, Meal, Order, OrderDetails

logger = setup_logger()


class ApiGetRestaurantsList(ListAPIView):
    queryset = Restaurant.objects.all()
    serializer_class = RestaurantSerializer
    authentication_classes = ()
    permission_classes = ()
    pagination_class = PageNumberPagination
    filter_backends = (SearchFilter, OrderingFilter)
    search_fields = ('title', 'address', 'author__username')


class ApiGetMealsList(ListAPIView):
    queryset = Meal.objects.all()
    serializer_class = MealSerializer
    authentication_classes = ()
#!/usr/bin/env python
import logging

from cocaine.asio.exceptions import LocatorResolveError
from cocaine.worker import Worker

import log

try:
    log.setup_logger('mm_inventory_logging')
    logger = logging.getLogger('mm.init')
except LocatorResolveError:
    log.setup_logger()
    logger = logging.getLogger('mm.init')
    logger.warn(
        'mm_inventory_logging is not set up properly in '
        'cocaine.conf, fallback to default logging service'
    )

from config import config
# TODO: rename inv module to 'inventory' when switched to using inventory worker
import inv as inventory
import helpers


def init_inventory_worker(worker):
    helpers.register_handle_wne(worker, inventory.Inventory.get_dc_by_host)


DEFAULT_DISOWN_TIMEOUT = 2
Пример #54
0
import executor
import log

fdir = os.path.dirname(os.path.realpath(__file__))
conf_dir = os.path.dirname(fdir)
with open(conf_dir + '/sos-ci.yaml') as stream:
    cfg = load(stream)

# Misc settings
DATA_DIR =\
    os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/data'
if cfg['Data']['data_dir']:
    DATA_DIR = cfg['Data']['data_dir']

logger = log.setup_logger(DATA_DIR + '/os-ci.log')
event_queue = deque()
pipeline = deque()


class InstanceBuildException(Exception):
    def __init__(self, message):
        Exception.__init__(self, message)


def _is_my_ci_recheck(event):
    if (event.get('type', 'nill') == 'comment-added' and
            cfg['AccountInfo']['recheck_string'] in event['comment'] and
            cfg['AccountInfo']['project_name'] == event['change']['project'] and
            event['change']['branch'] == 'master'):
        logger.info('Detected recheck request for event: %s', event)
Пример #55
0
def tdiff(t1,t2):
    # calculates the diff of t1 and t2
    # t1 and t2 are datetime.datetime objects
    # diff is a timedelta
    diff = t1-t2
    return abs(diff.total_seconds())

if __name__=='__main__':
    # When the script is executed or invoked via python -m,
    # parse command-line arguments and setup the logger
    # args/log _must_ exist at a module level when not invoking
    # at the command line.
    this.app_args = parse_arguments(defaults)
    check_args(app_args)
    this.log = setup_logger(app_args)

    stats_q = Queue() # Statistics Queue - might not be necessary
    form_q = Queue() # Queue for sending forms to the FormCrawler
    this.stop = multiprocessing.Event() # Event to handle kill signalling
    items = stats_q,form_q,stop # Application control items

    try:
        app = PokeyCrawl(*items)
        app.init_stats(Stats())
        app.prep_workers()
        color_log('[!] Preparations complete, crawl commencing',Color.MSG,'info')
        app.execute(True)
        color_log('[!] Application Executed',Color.MSG,'info')
        meta['execution_time'] = tdiff(ts(),this.start)
        meta['interrupted'] = False