def test_two_asynch_running_retry_locks(self): #--- get lock & hold it cmd1 = '''%s/run_cletus_job_once.py \ --lock-wait 0 \ --post-lock-sleep 2 \ ''' % test_path print(cmd1) self.c = envoy.connect(cmd1) #---- ensure cmd1 locks file before cmd2 starts! time.sleep(0.5) #---- try to get lock, wait for it, get it cmd2 = '''%s/run_cletus_job_once.py \ --lock-wait 3 \ --post-lock-sleep 0 \ ''' % test_path print(cmd2) self.c2 = envoy.connect(cmd2) #---- finish cmd2, then finish cmd1 self.c2.block() self.c.block() assert self.c2.status_code == 0 # locked assert self.c.status_code == 0 # locked
def setUp(self): global _httpbin if not _httpbin: self.httpbin = envoy.connect('gunicorn httpbin:app --bind=0.0.0.0:%s' % (PORT)) _httpbin = True time.sleep(1)
def test_two_asynch_running(self): #---- get lock & hold it cmd1 = '''%s/run_cletus_job_once.py \ --lock-wait 0 \ --post-lock-sleep 3 \ ''' % test_path self.c = envoy.connect(cmd1) cmd1_start_time = time.time() #---- ensure cmd1 locks file before cmd2 starts! time.sleep(0.5) print('sleep for 0.5 seconds') #---- try to get lock, fail, quit fast cmd2 = '''%s/run_cletus_job_once.py \ --lock-wait 0 \ --post-lock-sleep 0 \ ''' % test_path self.c2 = envoy.connect(cmd2) cmd2_start_time = time.time() #---- finish cmd2, then finish cmd1 self.c2.block() cmd2_dur = time.time() - cmd2_start_time self.c.block() cmd1_dur = time.time() - cmd1_start_time assert self.c.status_code == 0 # locked assert self.c2.status_code != 0 # not-locked assert cmd2_dur < 2.0 print('\nfirst process info:') print(cmd1) print(self.c.std_out) print(self.c.std_err) print('cmd1 dur: %f' % cmd1_dur) print('cmd1 lock status: %s' % self.c.status_code) print('second process info:') print(cmd2) print(self.c2.std_out) print(self.c2.std_err) print('cmd2 dur: %f' % cmd2_dur) print('cmd2 lock status: %s' % self.c2.status_code)
def setUp(self): global _httpbin if not _httpbin: c = envoy.connect('gunicorn httpbin:app --bind=0.0.0.0:%s' % (PORT)) self.httpbin = c _httpbin = True
def setUp(self): global _httpbin if not _httpbin: c = envoy.connect('gunicorn httpbin:app --bind=0.0.0.0:%s' % (PORT)) self.httpbin = c _httpbin = True time.sleep(.1)
def setUp(self): global _httpbin if not _httpbin: c = envoy.connect("gunicorn httpbin:app --bind=0.0.0.0:%s" % (PORT)) self.httpbin = c _httpbin = True time.sleep(1)
def test_single_asynch_process(self): # turned off for temp testing """ Objective is to confirm that this method of running the job will work correctly. """ cmd1 = '''%s/run_cletus_job_once.py \ --lock-wait 1 \ --post-lock-sleep 1 \ ''' % test_path #-------------------------------------------- # note that due to a bug in envoy, you must # block before you can check the status_code #------------------------------------------- self.c = envoy.connect(cmd1) self.c.block() assert self.c.status_code == 0 # locked
def install(package, connect=False): # Create temp git directory. git_path = os.path.join(tempfile.mkdtemp(), 'repo') os.makedirs(git_path) os.chdir(git_path) # Initialize the repo. envoy.run('git init') with open('requirements.txt', 'w') as f: f.write(package) envoy.run('git add requirements.txt') envoy.run('git commit -m \'init\'') # Create a new app. print 'creating' h_app = heroku.apps.add(stack='cedar') print h_app.__dict__ print 'pushing' cmd = 'git push https://{u}:{p}@code.heroku.com/{app}.git master'.format( u=urllib.quote(HEROKU_EMAIL), p=urllib.quote(HEROKU_PASS), app=h_app.name ) if not connect: r = envoy.run(cmd) h_app.destroy() else: r = envoy.connect(cmd) return r
def test_status_code_none(self): c = envoy.connect("sleep 5") self.assertEqual(c.status_code, None)
def test_status_code(self): c = envoy.connect("sleep 5") self.assertEqual(c.status_code, None)
def test_input(self): test_string = 'asdfQWER' r = envoy.connect("cat | tr [:lower:] [:upper:]") r.send(test_string) self.assertEqual(r.std_out, test_string.upper()) self.assertEqual(r.status_code, 0)
def test_status_code_failure(self): c = envoy.connect("sleeep 1") c.block() self.assertEqual(c.status_code, 127)
def test_status_code_success(self): c = envoy.connect("sleep 1") c.block() self.assertEqual(c.status_code, 0)
#!/usr/bin/env python from os import popen from envoy import connect, run bin="/Applications/Sublime Text 2.app/Contents/SharedSupport/bin/subl" d="/Users/nordmenss/git/GISTS/4039460" #connect(bin.replace(" ","\\ ")) #connect(bin.replace(" ","\\ ")+" -n "+d) #run(bin.replace(" ","\\ ")," -n "+d) #popen("/Applications/Sublime\\ Text\\ 2.app/Contents/SharedSupport/bin/subl -n /Users/nordmenss/git/GISTS/4039460") print connect("say hi")
def test_status_code_success(self): c = envoy.connect("sleep 1") time.sleep(2) self.assertEqual(c.status_code, 0)
from time import sleep import envoy run = envoy.connect('make run') print 'Running downloader...' while True: print 'Wating 200 seconds...' sleep(200) print 'Uploading...' c = envoy.run('make upload') print 'In sync!' # print c.std_out # print
def test_status_code_failure(self): c = envoy.connect("sleep 1") self.assertEqual(c.status_code, 127)
def __aggregator_process_archiveitem(aggregator_archive_item, scheduler, tmpdir, context): import envoy from django.template.loader import render_to_string from webui.cnmain.utils import get_virtuoso virtuoso_simple = get_virtuoso() virtuoso_master = get_virtuoso('master') loggy = local.logger aggregator = aggregator_archive_item.aggregator archive_item = aggregator_archive_item.archiveitem # # PART 1: generate XML file # loggy.debug("Processing " + unicode(archive_item)) output_filename = None if not aggregator.silk_rule: loggy.warning('No silk rule found, skipping') scheduler.status = Scheduler.INCOMPLETE else: output_filename = os.path.join(tmpdir, archive_item.file_hash + '.nt') conf_filename = os.path.join(tmpdir, archive_item.file_hash + '_conf.xml') silk_conf_xml = render_to_string( 'controller/aggregator/silk_rules.xml', dict(context, archive_item=archive_item, output_filename=output_filename)) with open(conf_filename, 'w') as fconf: fconf.write(silk_conf_xml) # # PART 2: execute SILK # loggy.info("Executing SILK on %s", unicode(archive_item)) result = envoy.connect( 'java -Xmx{} -DconfigFile={} -Dthreads={} ' '-cp "{}:{}/*" de.fuberlin.wiwiss.silk.Silk'.format( settings.SILK_SINGLE_MACHINE_HEAP, conf_filename, settings.SILK_SINGLE_MACHINE_THREADS, SILK_JAR_PATH, SILK_LIB_PATH, )) level = None status = 0 titan_log_cnt = 0 # pylint: disable=W0212 while result._process.poll() is None: line = result._process.stderr.readline()\ .strip().replace('%', '%%') if not line: continue tmplevel = line.split(":", 1)[0] if tmplevel in LEVEL_LIST: level = tmplevel if line.startswith("Exception in thread"): level = "EXCEPTION" if level == "EXCEPTION": status = 2 loggy.error("S> " + line) elif level in LEVEL_OUT: status = 1 loggy.warn("S> " + line) elif re.search(r"Finished writing \d+ entities", line) or \ re.search(r"Got \d+ vertices", line) or \ re.search(r"Wrote \d+ links", line): loggy.info("S> " + line) elif re.search(r"Getting data for vertices", line): if titan_log_cnt % 200 == 0: loggy.info("S> " + line) titan_log_cnt += 1 # pylint: enable=W0212 if status: loggy.error("SILK failed on %s", unicode(archive_item)) scheduler.status = Scheduler.FAIL if status == 2: return else: loggy.info("SILK executed successfully") # loggy.debug("Generated file: %s", output_filename) # # PART 3: dump graph data # dump_dir = '{}/'.format(archive_item.file_hash) loggy.info("Creating a dump of the namedgraph {}".format( archive_item.datagraph_mapped_name)) error = virtuoso_simple.dump_graph(archive_item.datagraph_mapped_name, dump_dir, create_dir=True) if error: loggy.error("Dump failed:") for line in error: loggy.error(line) raise Exception("Dump of the namedgraph failed: {}".format(error)) # # PART 4: load graph data in the master virtuoso instance # # we are assuming that the two virtuoso are on the same machine loggy.info("Loading dump in the master graph as {}".format( archive_item.datagraph_mapped_name)) # clear the entire named database before ingesting the data # since we're on titan we don't want this anymore # virtuoso_master.clear(archive_item.datagraph_mapped_name) # loggy.warning("Leaving data dump available for testing purposes") # error = virtuoso_master.load_graphs(dump_dir, remove_dir=False) error = virtuoso_master.load_graphs(dump_dir, remove_dir=True) if error: loggy.error("Load failed:") if isinstance(error, basestring): loggy.error(error) else: for line in error: loggy.error(line) raise Exception("Load of the namedgraph failed: {}".format(error)) if aggregator.silk_rule: # # PART 5: load SILK generated tuples # loggy.info("Loading SILK generated tuples") virtuoso_master.ingest( output_filename, settings.TRIPLE_DATABASE['PREFIXES']['silk_graph'], ) now = timezone.now() aggregator_archive_item.last_workflow_success = now if aggregator_archive_item.first_workflow_success is None: aggregator_archive_item.first_workflow_success = now aggregator_archive_item.save()
def __aggregator_process_archiveitem( aggregator_archive_item, scheduler, tmpdir, context): import envoy from django.template.loader import render_to_string from webui.cnmain.utils import get_virtuoso virtuoso_simple = get_virtuoso() virtuoso_master = get_virtuoso('master') loggy = local.logger aggregator = aggregator_archive_item.aggregator archive_item = aggregator_archive_item.archiveitem # # PART 1: generate XML file # loggy.debug("Processing " + unicode(archive_item)) output_filename = None if not aggregator.silk_rule: loggy.warning('No silk rule found, skipping') scheduler.status = Scheduler.INCOMPLETE else: output_filename = os.path.join( tmpdir, archive_item.file_hash + '.nt' ) conf_filename = os.path.join( tmpdir, archive_item.file_hash + '_conf.xml' ) silk_conf_xml = render_to_string( 'controller/aggregator/silk_rules.xml', dict(context, archive_item=archive_item, output_filename=output_filename) ) with open(conf_filename, 'w') as fconf: fconf.write(silk_conf_xml) # # PART 2: execute SILK # loggy.info("Executing SILK on %s", unicode(archive_item)) result = envoy.connect( 'java -Xmx{} -DconfigFile={} -Dthreads={} ' '-cp "{}:{}/*" de.fuberlin.wiwiss.silk.Silk'.format( settings.SILK_SINGLE_MACHINE_HEAP, conf_filename, settings.SILK_SINGLE_MACHINE_THREADS, SILK_JAR_PATH, SILK_LIB_PATH, ) ) level = None status = 0 titan_log_cnt = 0 # pylint: disable=W0212 while result._process.poll() is None: line = result._process.stderr.readline()\ .strip().replace('%', '%%') if not line: continue tmplevel = line.split(":", 1)[0] if tmplevel in LEVEL_LIST: level = tmplevel if line.startswith("Exception in thread"): level = "EXCEPTION" if level == "EXCEPTION": status = 2 loggy.error("S> " + line) elif level in LEVEL_OUT: status = 1 loggy.warn("S> " + line) elif re.search(r"Finished writing \d+ entities", line) or \ re.search(r"Got \d+ vertices", line) or \ re.search(r"Wrote \d+ links", line): loggy.info("S> " + line) elif re.search(r"Getting data for vertices", line): if titan_log_cnt % 200 == 0: loggy.info("S> " + line) titan_log_cnt += 1 # pylint: enable=W0212 if status: loggy.error("SILK failed on %s", unicode(archive_item)) scheduler.status = Scheduler.FAIL if status == 2: return else: loggy.info("SILK executed successfully") # loggy.debug("Generated file: %s", output_filename) # # PART 3: dump graph data # dump_dir = '{}/'.format(archive_item.file_hash) loggy.info("Creating a dump of the namedgraph {}".format( archive_item.datagraph_mapped_name)) error = virtuoso_simple.dump_graph( archive_item.datagraph_mapped_name, dump_dir, create_dir=True) if error: loggy.error("Dump failed:") for line in error: loggy.error(line) raise Exception("Dump of the namedgraph failed: {}".format( error )) # # PART 4: load graph data in the master virtuoso instance # # we are assuming that the two virtuoso are on the same machine loggy.info("Loading dump in the master graph as {}".format( archive_item.datagraph_mapped_name)) # clear the entire named database before ingesting the data # since we're on titan we don't want this anymore # virtuoso_master.clear(archive_item.datagraph_mapped_name) # loggy.warning("Leaving data dump available for testing purposes") # error = virtuoso_master.load_graphs(dump_dir, remove_dir=False) error = virtuoso_master.load_graphs(dump_dir, remove_dir=True) if error: loggy.error("Load failed:") if isinstance(error, basestring): loggy.error(error) else: for line in error: loggy.error(line) raise Exception("Load of the namedgraph failed: {}".format( error )) if aggregator.silk_rule: # # PART 5: load SILK generated tuples # loggy.info("Loading SILK generated tuples") virtuoso_master.ingest( output_filename, settings.TRIPLE_DATABASE['PREFIXES']['silk_graph'], ) now = timezone.now() aggregator_archive_item.last_workflow_success = now if aggregator_archive_item.first_workflow_success is None: aggregator_archive_item.first_workflow_success = now aggregator_archive_item.save()
import envoy import time r = envoy.connect('python /Users/timbueno/Projects/Intellibrella/RaspberryPi/Other/subprocesses/process2.py cool') run = True count = 0 while run: print 'Doing other things\n' if count == 10: r.kill() print 'Killed the process...\n' if count == 20: run = False count = count+1 time.sleep(1) print 'The program has run its course'
def launch(self): #if not os.path.exists(self.renderfilename): self.render() self.evince = envoy.connect('evince {}'.format(self.renderfilename))