class Backuptool(object): def __init__(self, config): """ :type config lib.config.VocConfigParser """ self.config = config # initialize mainloop self.log = logging.getLogger('Main') self.log.debug('creating GObject-MainLoop') self.mainloop = GObject.MainLoop() # initialize subsystem self.log.debug('creating Audio-Pipeline') self.statusServer = StatusServer(config) self.pipeline = Pipeline(config, self.statusServer) def run(self): self.log.info('starting Pipeline') self.pipeline.start() try: self.log.info('running GObject-MainLoop') self.mainloop.run() except KeyboardInterrupt: self.log.info('Terminated via Ctrl-C') def quit(self): self.log.info('quitting GObject-MainLoop') self.mainloop.quit()
def __init__(self): self.log = logging.getLogger('SyncStream') # initialize subsystem self.log.debug('creating A/V-Pipeline') self.pipeline = Pipeline() self.source = TCPSource(9999) self.netclock = NetClock(self.pipeline, None, 10000) self.do_run = True
def __init__(self, config): """ :type config lib.config.VocConfigParser """ self.config = config # initialize mainloop self.log = logging.getLogger('Main') self.log.debug('creating GObject-MainLoop') self.mainloop = GObject.MainLoop() # initialize subsystem self.log.debug('creating Audio-Pipeline') self.statusServer = StatusServer(config) self.pipeline = Pipeline(config, self.statusServer)
def __init__(self): self.log = logging.getLogger('Voctocore') self.log.debug('creating GObject-MainLoop') self.mainloop = GObject.MainLoop() # initialize subsystem self.log.debug('creating A/V-Pipeline') self.pipeline = Pipeline() self.log.debug('creating ControlServer') self.controlserver = ControlServer(self.pipeline)
class LoudnessMonitor(object): def __init__(self): self.log = logging.getLogger('LoudnessMonitor') # initialize subsystem self.log.debug('creating A/V-Pipeline') self.pipeline = Pipeline() def run(self): self.pipeline.configure() self.pipeline.start() try: self.log.info('running GObject-MainLoop') MainLoop.run() except KeyboardInterrupt: self.log.info('Terminated via Ctrl-C') def quit(self): self.log.info('quitting GObject-MainLoop') MainLoop.quit()
def main(): global conf, macMapping, hostAddress, t, ready, ncdict, pipeline # configure logging docolor = (Args.color == 'always') or (Args.color == 'auto' and sys.stderr.isatty()) handler = LogHandler(docolor) logging.root.addHandler(handler) if Args.verbose >= 2: level = logging.DEBUG elif Args.verbose == 1: level = logging.INFO else: level = logging.WARNING level = logging.DEBUG logging.root.setLevel(level) #load config conf = loadconfig("config.json") macMapping = conf["macMapping"] hostAddress = conf["hostAddress"] #start server t = threading.Thread(target=run_server) t.start() if clearOnStart: try: shutil.rmtree("./config") except FileNotFoundError: pass while not ready: try: time.sleep(2) print("\x1b[2J\x1b[H") monitorManager.load() print('syncstream ready') print('- registered clients -') for mon in monitorManager.monitors: print('{}: {} ({})'.format(mon.index, mon.ip, mon.mac)) print('press ctrl+c to start') except KeyboardInterrupt: print('Starting!') # make killable by ctrl-c logging.debug('setting SIGINT handler') signal.signal(signal.SIGINT, signal.SIG_DFL) # init main-class and main-loop logging.debug('Creating Pipeline') pipeline = Pipeline() source = TCPSource(9999) netclock = NetClock(pipeline, None, 10000) pipeline.configure() pipeline.start() netclock.start() ncdict = netclock.netprov ready = True logging.info('running GObject MainLoop') MainLoop.run()
def __init__(self): # import local which use the config or the logging system # this is required, so that we can cnfigure logging, before reading the config from lib.pipeline import Pipeline from lib.controlserver import ControlServer self.log = logging.getLogger('Voctocore') self.log.debug('creating GObject-MainLoop') self.mainloop = GObject.MainLoop() # initialize subsystem self.log.debug('creating A/V-Pipeline') self.pipeline = Pipeline() self.log.debug('creating ControlServer') self.controlserver = ControlServer(self.pipeline)
keywords = [ "ipad3", '"ipad 3"', '"new ipad"', "ipad2s", '"ipad 2s"', "ipad2", '"ipad 2"', "ipadmini", '"ipad mini"', '"ipad hd"', ] locations = "-78,36,-70,43" p = Pipeline(keywords=None, locations=locations, savefile="nycdata2014", savedelta=30) def strip_useless_info(input_queue=None, output_queue=None, keep_monitoring=None): # ... because less is more # also because it's faster to check out the results in the console # you can get the original tweet back using # http://api.twitter.com/1/statuses/show/tweet_id.json keep = ["coordinate_prob", "coordinates", "created_at", "id", "text", "geo", "from_user_id"] while keep_monitoring(): while input_queue.qsize(): tweet = input_queue.get() new_tweet = {key: tweet[key] for key in keep if key in tweet} new_tweet["text"] = new_tweet["text"].lower() output_queue.put(new_tweet) time.sleep(1)
def main(args_input=sys.argv[1:]): parser = define_parser() args = parser.parse_args(args_input) if "." in args.sample_name: sys.exit("Sample name cannot contain '.'") if args.fasta_size % 2 != 0: sys.exit("The fasta size needs to be an even number") if args.iedb_retries > 100: sys.exit( "The number of IEDB retries must be less than or equal to 100") if args.downstream_sequence_length == 'full': downstream_sequence_length = None elif args.downstream_sequence_length.isdigit(): downstream_sequence_length = int(args.downstream_sequence_length) else: sys.exit( "The downstream sequence length needs to be a positive integer or 'full'" ) # if args.iedb_install_directory: # lib.call_iedb.setup_iedb_conda_env() input_file_type = 'vcf' base_output_dir = os.path.abspath(args.output_dir) class_i_prediction_algorithms = [] class_ii_prediction_algorithms = [] for prediction_algorithm in sorted(args.prediction_algorithms): prediction_class = globals()[prediction_algorithm] prediction_class_object = prediction_class() if isinstance(prediction_class_object, MHCI): class_i_prediction_algorithms.append(prediction_algorithm) elif isinstance(prediction_class_object, MHCII): class_ii_prediction_algorithms.append(prediction_algorithm) class_i_alleles = [] class_ii_alleles = [] for allele in sorted(set(args.allele)): valid = 0 if allele in MHCI.all_valid_allele_names(): class_i_alleles.append(allele) valid = 1 if allele in MHCII.all_valid_allele_names(): class_ii_alleles.append(allele) valid = 1 if not valid: print("Allele %s not valid. Skipping." % allele) shared_arguments = { 'input_file': args.input_file, 'input_file_type': input_file_type, 'sample_name': args.sample_name, 'top_score_metric': args.top_score_metric, 'binding_threshold': args.binding_threshold, 'allele_specific_cutoffs': args.allele_specific_binding_thresholds, 'minimum_fold_change': args.minimum_fold_change, 'net_chop_method': args.net_chop_method, 'net_chop_threshold': args.net_chop_threshold, 'additional_report_columns': args.additional_report_columns, 'fasta_size': args.fasta_size, 'iedb_retries': args.iedb_retries, 'downstream_sequence_length': downstream_sequence_length, 'keep_tmp_files': args.keep_tmp_files, 'pass_only': args.pass_only, 'normal_sample_name': args.normal_sample_name, 'phased_proximal_variants_vcf': args.phased_proximal_variants_vcf, 'n_threads': args.n_threads, 'maximum_transcript_support_level': args.maximum_transcript_support_level, } if len(class_i_prediction_algorithms) > 0 and len(class_i_alleles) > 0: if args.epitope_length is None: sys.exit( "Epitope length is required for class I binding predictions") if args.iedb_install_directory: iedb_mhc_i_executable = os.path.join(args.iedb_install_directory, 'mhc_i', 'src', 'predict_binding.py') if not os.path.exists(iedb_mhc_i_executable): sys.exit("IEDB MHC I executable path doesn't exist %s" % iedb_mhc_i_executable) else: iedb_mhc_i_executable = None print("Executing MHC Class I predictions") output_dir = os.path.join(base_output_dir, 'MHC_Class_I') os.makedirs(output_dir, exist_ok=True) class_i_arguments = shared_arguments.copy() class_i_arguments['alleles'] = class_i_alleles class_i_arguments[ 'peptide_sequence_length'] = args.peptide_sequence_length class_i_arguments['iedb_executable'] = iedb_mhc_i_executable class_i_arguments['epitope_lengths'] = args.epitope_length class_i_arguments[ 'prediction_algorithms'] = class_i_prediction_algorithms class_i_arguments['output_dir'] = output_dir class_i_arguments['netmhc_stab'] = args.netmhc_stab pipeline = Pipeline(**class_i_arguments) pipeline.execute() elif len(class_i_prediction_algorithms) == 0: print( "No MHC class I prediction algorithms chosen. Skipping MHC class I predictions." ) elif len(class_i_alleles) == 0: print( "No MHC class I alleles chosen. Skipping MHC class I predictions.") if len(class_ii_prediction_algorithms) > 0 and len(class_ii_alleles) > 0: if args.iedb_install_directory: iedb_mhc_ii_executable = os.path.join(args.iedb_install_directory, 'mhc_ii', 'mhc_II_binding.py') if not os.path.exists(iedb_mhc_ii_executable): sys.exit("IEDB MHC II executable path doesn't exist %s" % iedb_mhc_ii_executable) else: iedb_mhc_ii_executable = None print("Executing MHC Class II predictions") output_dir = os.path.join(base_output_dir, 'MHC_Class_II') os.makedirs(output_dir, exist_ok=True) class_ii_arguments = shared_arguments.copy() class_ii_arguments['alleles'] = class_ii_alleles class_ii_arguments[ 'prediction_algorithms'] = class_ii_prediction_algorithms class_ii_arguments['peptide_sequence_length'] = 31 class_ii_arguments['iedb_executable'] = iedb_mhc_ii_executable class_ii_arguments['epitope_lengths'] = [15] class_ii_arguments['output_dir'] = output_dir class_ii_arguments['netmhc_stab'] = False pipeline = Pipeline(**class_ii_arguments) pipeline.execute() elif len(class_ii_prediction_algorithms) == 0: print( "No MHC class II prediction algorithms chosen. Skipping MHC class II predictions." ) elif len(class_ii_alleles) == 0: print( "No MHC class II alleles chosen. Skipping MHC class II predictions." ) if len(class_i_prediction_algorithms) > 0 and len( class_i_alleles) > 0 and len(class_ii_prediction_algorithms ) > 0 and len(class_ii_alleles) > 0: print("Creating combined reports") create_combined_reports(base_output_dir, args)
class SyncStream(object): def __init__(self): self.log = logging.getLogger('SyncStream') # initialize subsystem self.log.debug('creating A/V-Pipeline') self.pipeline = Pipeline() self.source = TCPSource(9999) self.netclock = NetClock(self.pipeline, None, 10000) self.do_run = True def run(self): self.pipeline.configure() self.pipeline.start() self.netclock.start() self.do_run = True self.log.info('running GObject-MainLoop') MainLoop.run() def quit(self): self.do_run = False self.log.info('stopping Pipeline') self.pipeline.stop() self.log.info('quitting GObject-MainLoop') MainLoop.quit() def reload(self): self.log.info('reloading pipeline') self.pipeline.stop() self.pipeline.configure() self.pipeline.start()
from lib.pipeline import Pipeline logging.basicConfig( filename='execution.log', format='%(asctime)-6s: %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG) logger = logging.getLogger('ipad.py') logger.debug('Starting the monitor') keywords = [ 'ipad3', '"ipad 3"', '"new ipad"', 'ipad2s', '"ipad 2s"', 'ipad2', '"ipad 2"', 'ipadmini', '"ipad mini"', '"ipad hd"' ] locations = "-78,36,-70,43" p = Pipeline(keywords=None, locations=locations, \ savefile='nycdata2014', savedelta=30) def strip_useless_info(input_queue=None, output_queue=None, keep_monitoring=None): #... because less is more # also because it's faster to check out the results in the console # you can get the original tweet back using # http://api.twitter.com/1/statuses/show/tweet_id.json keep = ['coordinate_prob', 'coordinates', 'created_at', 'id', 'text', \ 'geo', 'from_user_id'] while keep_monitoring(): while input_queue.qsize(): tweet = input_queue.get() new_tweet = {key: tweet[key] for key in keep if key in tweet}
def approve(event, context): pipeline = Pipeline(os.environ['PIPELINE_NAME']) payload = json.loads(parse_qs(event['body'])['payload'][0]) status, body = pipeline.approve(payload["user"]["name"], payload["actions"][0]["value"]) return { "statusCode": status, "body": body }
import os import json from urllib.parse import parse_qs from lib.pipeline import Pipeline from lib.slack import Slack slack = Slack(os.environ["APP_NAME"], os.environ["FAVICON_URL"]) pipeline = Pipeline(os.environ['PIPELINE_NAME']) webhook_url = os.environ["SLACK_WEBHOOK_URL"] def is_relavant(detail): return detail["state"] == "FAILED" or detail["stage"] in os.environ["FILTER_STAGES"].split(',') def listen(event, context): detail = event["detail"] if is_relavant(detail): message = slack.build_message(pipeline.revision(detail["execution-id"]), os.environ['PIPELINE_NAME'], detail) body = slack.send(webhook_url, message) else: body = "Irrelevant event received" return { "statusCode": 200, "body": body } def review(event, context): detail = event["detail"] message = slack.build_prompt(pipeline.revision(detail["execution-id"]), os.environ['PIPELINE_NAME'], detail) body = slack.send(webhook_url, message) return { "statusCode": 200, "body": body } def approve(event, context): pipeline = Pipeline(os.environ['PIPELINE_NAME'])
def __init__(self): self.log = logging.getLogger('LoudnessMonitor') # initialize subsystem self.log.debug('creating A/V-Pipeline') self.pipeline = Pipeline()
from lib.utils import decode from lib.event import Event if len(sys.argv) != 3: print print "\tUsage: python %s <input-file> <queue1,queue2..>" % sys.argv[0] print print "\tExample: python %s /tmp/events.txt events-queue" % sys.argv[0] print exit() file = open(sys.argv[1], 'r') queues = sys.argv[2].split(',') pipeline = Pipeline(None, queues) for line in file: line = decode(line) try: event = Event.from_unicode(line) except: print "ERROR - lines from file needs to follow Event format" for key in event.keys(): value = event.value(key) event.clear(key) key = key.replace(' ','_') event.add(key, value)