Ejemplo n.º 1
0
 def test_b_run2threads(self):
     self.cleanup_files()
     write_pickle(self.params.param_pickle_path, self.params)
     t1 = TestThread(FsDriftWorkload(self.params), 'fsdthr-1')
     t2 = TestThread(FsDriftWorkload(self.params), 'fsdthr-2')
     verbosity_fn = os.path.join(self.params.network_shared_path, 'verbosity')
     with open(verbosity_fn, 'w') as vf:
         vf.write('0xffffffff')
     threads = [ t1, t2 ]
     for t in threads: 
         t.start()
     mylog = fsd_log.start_log('run2threads')
     mylog.info('threads started')
     time.sleep(2)
     touch(self.params.starting_gun_path)
     mylog.info('starting gun fired')
     for t in threads:
         t.join()
     mylog.info('threads done')
     totals = FSOPCounters()
     for t in threads:
         print(t.worker.ctrs)
         t.worker.ctrs.add_to(totals)
         t.worker.chk_status()
     print('total counters:')
     print(totals)
Ejemplo n.º 2
0
Archivo: auto.py Proyecto: chugare/yys
def auto_rihefang_mt():
    pipei_p = cv2.imread('tiaozhan.png', 0)
    zudui_p = cv2.imread('zudui.png', 0)
    zhunbei_p = cv2.imread('zhunbei.png', 0)
    start_p = cv2.imread('start.png', 0)
    finsih_p = cv2.imread('finish.png', 0)

    end2_p = cv2.imread('jixu2.png', 0)

    end_p = cv2.imread('jixu.png', 0)

    while True:
        img = get_picture()
        threads = []
        result_list = []
        threads.append(pattern_thread(img, end_p, 0.5))
        threads.append(pattern_thread(img, end2_p, 0.5))
        threads.append(pattern_thread(img, zudui_p))
        threads.append(pattern_thread(img, pipei_p))
        threads.append(pattern_thread(img, start_p))
        threads.append(pattern_thread(img, zhunbei_p, 0.5))
        threads.append(pattern_thread(img, finsih_p))
        for t in threads:
            t.start()
        for t in threads:
            t.join()
        for i, t in enumerate(threads):
            if t.result[0]:

                touch(t.result[1], t.result[2])
                break
        time.sleep(0.5)
def main(outfn, assembly, debug, key, keyfile, dryrun, force, analysis_ids=None, infile=None, project=None):

	if debug:
		logger.setLevel(logging.DEBUG)
	else:
		logger.setLevel(logging.INFO)

	if infile is not None:
		infile = dxpy.DXFile(infile)
		dxpy.download_dxfile(infile.get_id(), "infile")
		ids = open("infile",'r')
	elif analysis_ids is not None:
		ids = analysis_ids
	else:
		logger.error("Must supply one of --infile or a list of one or more analysis-ids")
		return

	authid, authpw, server = common.processkey(key, keyfile)
	keypair = (authid,authpw)

	for (i, analysis_id) in enumerate(ids):
		logger.info('%s' %(analysis_id))
		accessioned_files = accession_analysis(analysis_id, keypair, server, assembly, dryrun, force)

	print accessioned_files
	common.touch(outfn)
	outfile = dxpy.upload_local_file(outfn)

	output = {}
	output["outfile"] = dxpy.dxlink(outfile)

	return output
Ejemplo n.º 4
0
    def setUpWorkingCopy(cls, working_path):
        yield common.CreateStandardDirectoryStructure()
        cls.rev0 = cls.getAbsoluteRev()

        os.makedirs(os.path.join(working_path, 'a'))
        common.touch(os.path.join(working_path, 'a', 'b'), 'foo\n')
        yield common.Commit('create a/b')
        cls.rev1 = cls.getAbsoluteRev()
Ejemplo n.º 5
0
    def setUpWorkingCopy(cls, working_path):
        yield common.CreateStandardDirectoryStructure()
        cls.rev0 = cls.getAbsoluteRev()

        os.makedirs(os.path.join(working_path, 'a'))
        common.touch(os.path.join(working_path, 'a', 'b'), 'foo\n')
        yield common.Commit('create a/b')
        cls.rev1 = cls.getAbsoluteRev()
Ejemplo n.º 6
0
        def test_a_runthread(self):
            self.cleanup_files()

            write_pickle(self.params.param_pickle_path, self.params)
            fsd = FsDriftWorkload(self.params)
            fsd.tid = 'worker_thread'
            fsd.verbose = True
            touch(fsd.params.starting_gun_path)
            fsd.do_workload()
            print(fsd.ctrs)
            fsd.chk_status()
Ejemplo n.º 7
0
 def test_read_file_info_for_failure(self):
     self.setup_class()
     # Set project root path
     tests_dir   = os.path.join(self.project_dir, 'tests')
     tmp_dir     = os.path.join(tests_dir, 'tmp')
     fail_file   = os.path.join(tmp_dir, 'test_failure.mkv')
     # Test
     common.touch(fail_file)
     import pytest
     with pytest.raises(FFMPEGHandleFFProbeError):
         self.ffmpeg.file_probe(fail_file)
Ejemplo n.º 8
0
Archivo: auto.py Proyecto: chugare/yys
def auto_common():
    start_p = cv2.imread('start.png', 0)
    start_yyh_p = cv2.imread('start_yyh.png', 0)
    up_p = cv2.imread('up.png', 0)
    end_p = cv2.imread('end.png', 0)
    finsih_p = cv2.imread('finish.png', 0)
    while True:
        for i in range(3):
            r, x, y = find_pattern(up_p)
            if r:
                touch(x, y)
                print("x:%d,y:%d" % (x, y))
                print('一次刷本Finish')
                continue
Ejemplo n.º 9
0
def fastaSplit(filenames, mode, num, output_dir, concurrent=1, max_split=1000):
    """
    split fasta files, use multiprocess for parallel
    :param filenames: a list of fasta files
    :param mode: length or number
    :param num:
    :param output_dir: output directory
    :param concurrent: see -h
    :param max_split: see -h
    :return:
    """
    assert mode in ["number", "length"]
    num = int(num)

    output_dir = mkdir(output_dir)
    split_list = os.path.join(output_dir, "split_list")
    done = os.path.join(output_dir, "split_done")

    # avoid rerun
    if os.path.exists(done):
        LOG.info(
            "%r exists, pass this step; if you want to rerun, delete the file"
            % done)
        return fofn2list(split_list)

    # for multiprocessing
    pool = Pool(processes=concurrent)
    results = []

    LOG.info("Split '{filenames}' by sequence {mode} =~ {num} per file".format(
        **locals()))

    for f in filenames:
        print("processing %s" % f)
        results.append(
            pool.apply_async(split_fasta,
                             (f, mode, num, output_dir, max_split)))

    pool.close()
    pool.join()

    file_list = []
    for r in results:
        file_list += r.get()

    with open(split_list, "w") as fh:
        fh.write("\n".join(file_list))

    touch(done)
    return file_list
Ejemplo n.º 10
0
 def wait_for_gate(self):
     if self.params.starting_gun_path:
         gateReady = self.gen_thread_ready_fname(self.tid)
         touch(gateReady)
         while not os.path.exists(self.params.starting_gun_path):
             if os.path.exists(self.params.abort_path):
                 raise FsDriftException(
                     'thread ' + str(self.tid) + ' saw abort flag')
             time.sleep(0.3)
     # wait a little longer so that
     # other clients have time to see that gate exists
     # give everyone else a chance to see that start-file is there
     # it takes at least 1 second for NFS to invalidate cached metadata
     # with actimeo=1
     time.sleep(2)
Ejemplo n.º 11
0
 def test_multiproc(self):
     self.log.info('starting test')
     self.resetDir()
     with open('/tmp/weights.csv', 'w') as w_f:
         w_f.write( '\n'.join(Test.workload_table))
     thread_ready_timeout = 4
     thread_count = 4
     invokeList = []
     for j in range(0, thread_count):
         s = worker_thread.FsDriftWorkload(self.params)
         s.tid = str(j)
         invokeList.append(s)
     threadList = []
     for s in invokeList:
         threadList.append(subprocess(s))
     for t in threadList:
         t.start()
     threads_ready = True
     self.log.info('threads started')
     for i in range(0, thread_ready_timeout):
         threads_ready = True
         for s in invokeList:
             thread_ready_file = s.gen_thread_ready_fname(s.tid)
             if not os.path.exists(thread_ready_file):
                 threads_ready = False
         if threads_ready:
             break
         time.sleep(1)
     if not threads_ready:
         raise FsDriftException('threads did not show up within %d seconds'
                         % thread_ready_timeout)
     time.sleep(1)
     self.log.info('threads awaiting starting gun')
     common.touch(self.params.starting_gun_path)
     for t in threadList:
         t.retrieve()
         t.join()
         self.log.info('thread ' + t.invoke.tid + ' finished')
     for t in threadList:
         if t.invoke.elapsed_time == -1.0:
             raise FsDriftException('subprocess never got elapsed time')
         if t.invoke.status != OK:
             raise FsDriftException('subprocess status %d for thread %s' % (t.invoke.status, t.invoke.tid))
         print('thread %s counters:\n%s' % (t.invoke.tid, str(t.invoke.ctrs)))
         if t.invoke.ctrs.read_bytes == 0:
             raise FsDriftException('subprocess never read any data')
def main(outfn,
         assembly,
         debug,
         key,
         keyfile,
         dryrun,
         force,
         analysis_ids=None,
         infile=None,
         project=None):

    if debug:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.INFO)

    if infile is not None:
        infile = dxpy.DXFile(infile)
        dxpy.download_dxfile(infile.get_id(), "infile")
        ids = open("infile", 'r')
    elif analysis_ids is not None:
        ids = analysis_ids
    else:
        logger.error(
            "Must supply one of --infile or a list of one or more analysis-ids"
        )
        return

    authid, authpw, server = common.processkey(key, keyfile)
    keypair = (authid, authpw)

    for (i, analysis_id) in enumerate(ids):
        logger.info('%s' % (analysis_id))
        accessioned_files = accession_analysis(analysis_id, keypair, server,
                                               assembly, dryrun, force)

    print accessioned_files
    common.touch(outfn)
    outfile = dxpy.upload_local_file(outfn)

    output = {}
    output["outfile"] = dxpy.dxlink(outfile)

    return output
Ejemplo n.º 13
0
Archivo: auto.py Proyecto: chugare/yys
def auto(host=False):

    start_p = cv2.imread('start.png', 0)
    start_yyh_p = cv2.imread('start_yyh.png', 0)

    end_p = cv2.imread('end.png', 0)
    finsih_p = cv2.imread('finish.png', 0)
    count = 0
    while True:
        img = get_picture()
        if host:
            r, x, y = find_pattern(img, start_p, 0.6)
            if r:
                touch(x, y)
                print('识别了开始刷本的pattern,开始刷本 %d 次' % count)
                count += 1
                # continue
        # r, x, y = find_pattern(img,start_yyh_p)
        # if r:
        #     touch(x,y)
        #
        #     print('识别了开始刷业原火副本的pattern,开始刷本 %d 次' % count)
        #     count += 1
        #     continue
        r, x, y = find_pattern(img, end_p)
        if r:
            touch(x + 200, y + 50)
            touch(x + 200, y + 50)
            print('识别了刷本结束的pattern')
            time.sleep(0.5)
Ejemplo n.º 14
0
def auto_tansuo():
    jieshou_p = cv2.imread('gou.png', 0)
    end_p = cv2.imread('end.png', 0)
    back_p = cv2.imread('back.png', 0)
    confirm_p = cv2.imread('confirm.png', 0)
    flag_p = cv2.imread('flag.png', 0)
    count = 0

    class syn:
        def __init__(self):
            self.state = 0

        def set_state(self, value):
            self.state = value

    st = syn()

    def set_flag():
        st.set_state(1)

    def reset_flag():
        st.set_state(0)

    while True:

        img = get_picture()
        threads = []
        result_list = []

        threads.append(pattern_thread(img, end_p))
        threads.append(pattern_thread(img, jieshou_p))
        threads.append(pattern_thread(img, flag_p, 0.5))
        threads.append(pattern_thread(img, back_p))
        threads.append(pattern_thread(img, confirm_p))
        for t in threads:
            t.start()
        for t in threads:
            t.join()
        for i, t in enumerate(threads):
            if t.result[0]:
                if i == 0:
                    touch(t.result[1] + 200, t.result[2] + 50)
                    break
                elif i == 2:
                    set_flag()

                elif i == 3:
                    if st.state == 1:
                        touch(t.result[1], t.result[2])
                        reset_flag()
                else:
                    touch(t.result[1], t.result[2])
                    break
        time.sleep(0.5)
Ejemplo n.º 15
0
def auto_dayuewan_mt():
    ready_p = cv2.imread('ready.png', 0)
    start_p = cv2.imread('start.png', 0)
    finsih_p = cv2.imread('finish.png', 0)
    end_p = cv2.imread('end.png', 0)
    dajiuma_p = cv2.imread('dajiuma.png', 0)
    focus_p = cv2.imread('focus.png', 0)

    battle_state = 0
    while True:
        img = get_picture()
        threads = []
        result_list = []
        threads.append(pattern_thread(img, end_p, 0.7))
        threads.append(pattern_thread(img, focus_p, 0.7))
        threads.append(pattern_thread(img, dajiuma_p, 0.6))
        threads.append(pattern_thread(img, ready_p, 0.7))
        threads.append(pattern_thread(img, start_p))
        threads.append(pattern_thread(img, finsih_p))
        for t in threads:
            t.start()
        for t in threads:
            t.join()
        focused = False
        for i, t in enumerate(threads):
            if t.result[0]:
                if i == 3 or i == 4:
                    print('start')
                    battle_state = 1
                    touch(t.result[1], t.result[2])
                elif i == 2:
                    if battle_state == 1:
                        print('1')
                        touch(t.result[1], t.result[2] + 50)
                        battle_state = 2
                elif i == 5:
                    print('end')
                    battle_state = 0
                    touch(t.result[1], t.result[2])
Ejemplo n.º 16
0
def abort_test(abort_fn, thread_list):
    if not os.path.exists(abort_fn):
        common.touch(abort_fn)
    for t in thread_list:
        t.terminate()
def main(outfn, assembly, debug, key, keyfile, dryrun, force, pipeline, analysis_ids=None, infile=None, project=None):

	if debug:
		logger.info('setting logger level to logging.DEBUG')
		logger.setLevel(logging.DEBUG)
	else:
		logger.info('setting logger level to logging.INFO')
		logger.setLevel(logging.INFO)

	if infile is not None:
		infile = dxpy.DXFile(infile)
		dxpy.download_dxfile(infile.get_id(), "infile")
		ids = open("infile",'r')
	elif analysis_ids is not None:
		ids = analysis_ids
	else:
		logger.error("Must supply one of --infile or a list of one or more analysis-ids")
		return

	authid, authpw, server = common.processkey(key, keyfile)
	keypair = (authid,authpw)

	common_metadata.update({'assembly': assembly})

	with open(outfn, 'w') as fh:
		if dryrun:
			fh.write('---DRYRUN: No files have been modified---\n')
		fieldnames = ['analysis','experiment','assembly','dx_pipeline','files','error']
		output_writer = csv.DictWriter(fh, fieldnames, delimiter='\t')
		output_writer.writeheader()

		for (i, analysis_id) in enumerate(ids):
			logger.debug('debug %s' %(analysis_id))
			analysis = dxpy.describe(analysis_id.strip())
			experiment = get_experiment_accession(analysis)
			output = {
				'analysis': analysis_id,
				'experiment': experiment,
				'assembly': assembly
			}
			logger.info('Accessioning analysis name %s executableName %s' %(analysis.get('name'), analysis.get('executableName')))

			if analysis.get('name') == 'histone_chip_seq':
				output.update({'dx_pipeline':'histone_chip_seq'})
				try:
					accessioned_files = accession_peaks_analysis_files(analysis, keypair, server, dryrun, force)
				except:
					accessioned_files = None
					output.update({'error':sys.exc_info()[0]})
				else:
					output.update({'error':""})
			elif analysis.get('executableName') == 'ENCODE mapping pipeline':
				output.update({'dx_pipeline':'ENCODE mapping pipeline'})
				try:
					accessioned_files = accession_mapping_analysis_files(analysis, keypair, server, dryrun, force)
				except:
					accessioned_files = None
					output.update({'error':sys.exc_info()[0]})
				else:
					output.update({'error':""})
			else:
				logger.error('unrecognized analysis pattern %s %s ... skipping.' %(analysis.get('name'), analysis.get('executableName')))
				output.update({'dx_pipeline':'unrecognized'})
				accessioned_files = None
				output.update({'error':'unrecognized analysis pattern %s %s' %(analysis.get('name'), analysis.get('executableName'))})

			file_accessions = [f.get('accession') for f in (accessioned_files or [])]
			logger.info("Accessioned: %s" %(file_accessions))
			output.update({'files':file_accessions})
			output_writer.writerow(output)

	common.touch(outfn)
	outfile = dxpy.upload_local_file(outfn)

	output = {}
	output["outfile"] = dxpy.dxlink(outfile)

	return output
Ejemplo n.º 18
0
Archivo: auto.py Proyecto: chugare/yys
def react(pattern, response, t):
    r, x, y = find_pattern(pattern)
    if r:
        touch(x, y)
        print('%s' % response, t)
Ejemplo n.º 19
0
Archivo: auto.py Proyecto: chugare/yys
def auto_rihefang():
    pipei_p = cv2.imread('tiaozhan.png', 0)
    zudui_p = cv2.imread('zudui.png', 0)
    zhunbei_p = cv2.imread('zhunbei.png', 0)
    start_p = cv2.imread('start.png', 0)
    finsih_p = cv2.imread('finish.png', 0)

    end2_p = cv2.imread('jixu2.png', 0)

    end_p = cv2.imread('jixu.png', 0)
    count = 0

    while True:

        img = get_picture()
        r, x, y = find_pattern(img, end_p)
        if r:
            touch(x, y)
            print('识别了刷本结束的pattern', 0.5)
            time.sleep(0.5)
            touch(x, y)
            continue
        r, x, y = find_pattern(img, end2_p)
        if r:
            touch(x, y)
            print('识别了刷本结束的pattern', 0.5)
            time.sleep(0.5)
            touch(x, y)
            continue
        r, x, y = find_pattern(img, zudui_p)
        if r:
            touch(x, y)
            print('组队完成')

            continue
        r, x, y = find_pattern(img, pipei_p)
        if r:
            touch(x, y)
            print('开始匹配')
            continue
        r, x, y = find_pattern(img, start_p)
        if r:
            touch(x, y)
            print('开始挑战')
            continue
        r, x, y = find_pattern(img, zhunbei_p, 0.5)
        if r:
            touch(x, y)

            print('准备完成,开始刷本 %d 次' % count)
            count += 1
            continue
        r, x, y = find_pattern(img, finsih_p)
        if r:
            touch(x, y)
            print("x:%d,y:%d" % (x, y))
            print('一次刷本Finish')
            continue
Ejemplo n.º 20
0
def run_multi_thread_workload(prm):

    host = prm.as_host
    if host == None:
        host = 'localhost'
    prm_slave = (prm.host_set != [])
    # FIXME: get coherent logging level interface
    verbose = os.getenv('LOGLEVEL_DEBUG' != None)
    host_startup_timeout = 5  + len(prm.host_set) / 3

    # for each thread set up SmallfileWorkload instance,
    # create a thread instance, and delete the thread-ready file

    thread_list = create_worker_list(prm)
    my_host_invoke = thread_list[0].invoke
    my_log = fsd_log.start_log('%s.master' % host)
    my_log.debug(prm)

    # start threads, wait for them to reach starting gate
    # to do this, look for thread-ready files

    for t in thread_list:
        ensure_deleted(t.invoke.gen_thread_ready_fname(t.invoke.tid))
    for t in thread_list:
        t.start()
    my_log.debug('started %d worker threads on host %s' %
                                (len(thread_list), host))

    # wait for all threads to reach the starting gate
    # this makes it more likely that they will start simultaneously

    abort_fname = prm.abort_path
    thread_count = len(thread_list)
    thread_to_wait_for = 0
    startup_timeout = 3
    sec = 0.0
    while sec < startup_timeout:
        for k in range(thread_to_wait_for, thread_count):
            t = thread_list[k]
            fn = t.invoke.gen_thread_ready_fname(t.invoke.tid)
            if not os.path.exists(fn):
                my_log.debug('thread %d thread-ready file %s not found yet with %f sec left' % 
                            (k, fn, (startup_timeout - sec)))
                break
            thread_to_wait_for = k + 1
            # we only timeout if no more threads have reached starting gate
            # in startup_timeout sec
            sec = 0.0
        if thread_to_wait_for == thread_count:
            break
        if os.path.exists(abort_fname):
            break
        sec += 0.5
        time.sleep(0.5)

    # if all threads didn't make it to the starting gate

    if thread_to_wait_for < thread_count:
        abort_test(abort_fname, thread_list)
        raise FsDriftException('only %d threads reached starting gate' 
                                % thread_to_wait_for)

    # declare that this host is at the starting gate

    if prm_slave:
        host_ready_fn = gen_host_ready_fname(prm, prm.as_host)
        my_log.debug('host %s creating ready file %s' %
                     (my_host_invoke.onhost, host_ready_fn))
        common.touch(host_ready_fn)

    sg = prm.starting_gun_path
    if not prm_slave:
        my_log.debug('wrote starting gate file ')
        sync_files.write_sync_file(sg, 'hi there')

    # wait for starting_gate file to be created by test driver
    # every second we resume scan from last host file not found

    if prm_slave:
        my_log.debug('awaiting ' + sg)
        for sec in range(0, host_startup_timeout+3):
            # hack to ensure that directory is up to date
            #   ndlist = os.listdir(my_host_invoke.network_dir)
            # if verbose: print(str(ndlist))
            if os.path.exists(sg):
                break
            if os.path.exists(prm.abort_path):
                log.info('saw abort file %s, aborting test' % prm.abort_path)
                break
            time.sleep(1)
        if not os.path.exists(sg):
            abort_test(prm.abort_path, thread_list)
            raise Exception('starting signal not seen within %d seconds'
                            % host_startup_timeout)
    if verbose:
        print('starting test on host ' + host + ' in 2 seconds')
    time.sleep(2 + random.random())  # let other hosts see starting gate file

    # FIXME: don't timeout the test,
    # instead check thread progress and abort if you see any of them stalled
    # but if servers are heavily loaded you can't rely on filesystem

    # wait for all threads on this host to finish

    for t in thread_list:
        my_log.debug('waiting for thread %s' % t.invoke.tid)
        t.retrieve()
        t.join()

    # if not a slave of some other host, print results (for this host)

    if not prm_slave:
        try:
            worker_list = [ t.invoke for t in thread_list ] 
            output_results.output_results(prm, worker_list)
        except FsDriftException as e:
            print('ERROR: ' + str(e))
            return NOTOK
    else:

        # if we are participating in a multi-host test
        # then write out this host's result in pickle format
        # so test driver can pick up result

        result_filename = host_result_filename(prm, prm.as_host)
        my_log.debug('saving result to filename %s' % result_filename)
        worker_list = [ t.invoke for t in thread_list ]
        sync_files.write_pickle(result_filename, worker_list)
        time.sleep(1.2)  # for benefit of NFS with actimeo=1

    return OK