def test_split_connection(self): cm = None M = None S = None try: cm = Cm.CM("test_split_connection") cm.create_workspace() # M (master) S(slave) M = _init_pgs(0, 'localhost', 1900, cm.dir) M.smr.role_master(M.id, 1, 0) S = _init_pgs(1, 'localhost', 1910, cm.dir) S.smr.role_slave(S.id, 'localhost', M.base_port, 0) # M: confset "slave_idle_timeout_msec" to 100 M.smr.fi_once('__no_free_client_conn') M.smr.confset('slave_idle_timeout_msec', 100) S.smr.wait_role(Smr.SMR.LCONN) M.smr.confset('slave_idle_timeout_msec', 18000) # S: role slave again ps = S.smr.getseq_log() S.smr.role_slave(S.id, 'localhost', M.base_port, ps['max']) S.smr.wait_role(Smr.SMR.SLAVE) S.be.init_conn() # check be at S works # Util.tstop('before check it!') r = S.be.set(0, '100') assert r >= 0 finally: if M is not None: M.kill() if S is not None: S.kill() if cm is not None: cm.remove_workspace()
def test_slave_keep_alive(self): cm = None pgs1 = None pgs2 = None pgs3 = None try: cm = Cm.CM("test_pgs") cm.create_workspace() pg = Pg.PG(0) # pgs1 --> master pgs1 = Pgs.PGS(0, 'localhost', 1900, cm.dir) pg.join(pgs1, start=True) pgs1.smr.wait_role(Smr.SMR.MASTER) # pgs2 --> slave pgs2 = Pgs.PGS(1, 'localhost', 1910, cm.dir) pg.join(pgs2, start=True) pgs2.smr.wait_role(Smr.SMR.SLAVE) # send command r = pgs2.be.set(0, '100') assert r >= 0 # check master idle time grows close upto 1 second. for i in range(0, 9): res = pgs1.smr.info('slave') line = res['slave']['slave_' + str(pgs2.id)] assert line != None idle_msec = int(line[(line.find('idle_msec=') + len('idle_msec=')):]) assert idle_msec < i * 100 + 500 time.sleep(0.1) # sleep 100 msec # make pgs3 and force join pgs3 = Pgs.PGS(1, 'localhost', 1920, cm.dir) pg.join(pgs3, start=True, Force=True) pgs3.smr.wait_role(Smr.SMR.SLAVE) pgs2.smr.wait_role(Smr.SMR.LCONN) # check r2 = pgs3.be.get(0) assert r == r2 # current configuration (pgs1, pgs3) pgs1.smr.confset('slave_idle_timeout_msec', 100) pgs3.smr.wait_role(Smr.SMR.LCONN) finally: if pgs1 is not None: pgs1.kill_smr() pgs1.kill_be() if pgs2 is not None: pgs2.kill_smr() pgs2.kill_be() if pgs3 is not None: pgs3.kill_smr() pgs3.kill_be() if cm is not None: cm.remove_workspace()
def _test_pgs(self): cm = None pgs = None try: cm = Cm.CM("test_pgs") assert cm is not None cm.create_workspace() assert cm.dir != None pgs = Pgs.PGS(0, 'localhost', 1900, cm.dir) assert pgs is not None pgs.start_smr() assert pgs.smr != None role = pgs.smr.get_role() assert role == Smr.SMR.NONE pgs.start_be() assert pgs.be != None pgs.smr.wait_role(Smr.SMR.LCONN) except: Util.tstop('Exception Occurred') traceback.print_exc() e = sys.exc_info()[0] finally: if pgs is not None: pgs.kill_be() pgs.kill_smr() if cm is not None: cm.remove_workspace()
def _test_smr(self): cm = None pgs = None try: cm = Cm.CM("test_pgs") assert cm is not None cm.create_workspace() assert cm.dir != None pgs = Pgs.PGS(0, 'localhost', 1900, cm.dir) assert pgs is not None pgs.start_smr() assert pgs.smr != None # send confset with empty arguments self.assertRaisesRegexp(Exception, '-ERR bad number of token:0', pgs.smr.confset, '', '') finally: if pgs is not None: pgs.kill_smr() if cm is not None: cm.remove_workspace()
def test_deadline(self): cm = None pgs = None dummy_peer = None try: cm = Cm.CM("test_pgs") cm.create_workspace() pg = Pg.PG(0) pgs = Pgs.PGS(0, 'localhost', 1900, cm.dir) pg.join(pgs, start=True) pgs.smr.wait_role(Smr.SMR.MASTER) dummy_peer = DummyPeer('localhost', 1902, 3) dummy_peer.start() time.sleep(0.5) st = int(round(time.time() * 1000)) seqs = pgs.smr.getseq_log() et = int(round(time.time() * 1000)) assert et - st < 1000 finally: # Util.tstop('Check output!') if pgs is not None: pgs.kill_smr() pgs.kill_be() if dummy_peer is not None: dummy_peer.join() if cm is not None: cm.remove_workspace()
def __init__(self, name, members): self.name = name self.members = members self.cm = Cm.CM(self.name) self.cm.create_workspace() self.pg = None self.pgs_ary = [] self.clients = []
def __init__(self, name, copy): self.name = name self.copy = copy self.cm = Cm.CM(self.name) self.cm.create_workspace() self.pg = None self.pgs_ary = [] self.clients = []
def _test_be(self): cm = None pgs = None try: cm = Cm.CM("test_be") cm.create_workspace() pg = Pg.PG(0) # pgs --> master pgs = Pgs.PGS(0, 'localhost', 1900, cm.dir) pg.join(pgs, start=True) pgs.smr.wait_role(Smr.SMR.MASTER) # Test basic op old = pgs.be.set(0, '100') assert old >= 0 r = pgs.be.reset() assert r == 0 new = pgs.be.set(0, '100') assert old == new r = pgs.be.ping() assert r == 0 # do checkpoint r = pgs.be.ckpt() assert r == 0 # restart pg.leave(pgs.id, kill=True) pg.join(pgs, start=True) pgs.smr.wait_role(Smr.SMR.MASTER) # check crc of key = 0 new = pgs.be.get(0) assert old == new except: Util.tstop('Exception Occurred') traceback.print_exc() e = sys.exc_info()[0] finally: if pgs is not None: pgs.kill_smr() pgs.kill_be() if cm is not None: cm.remove_workspace()
def test_quorum_membership_commands(self): cm = None M = None try: cm = Cm.CM("test_quorum") cm.create_workspace() # M(1), S, S M = _init_pgs(0, 'localhost', 1900, cm.dir) M.smr.role_master(M.id, 1, 0, [1, 2]) qm = M.smr.getquorumv() assert len(qm) == 3 # quorum and nids finally: #Util.tstop('Check output!') if M is not None: M.kill() if cm is not None: cm.remove_workspace()
def _test_cm(self): cm = None try: cm = Cm.CM("test_cm") assert cm is not None cm.create_workspace() dir = cm.dir assert os.path.exists(dir) cm.remove_workspace() assert cm.dir is None assert os.path.exists(dir) == False cm = None finally: if cm != None: cm.remove_workspace()
def test_logdelete(self): cm = None pgs1 = None try: cm = Cm.CM("test_portscan") cm.create_workspace() pg = Pg.PG(0) # pgs --> master pgs = Pgs.PGS(0, 'localhost', 1900, cm.dir) pg.join(pgs, start=True) pgs.smr.wait_role(Smr.SMR.MASTER) # ----------------------------------------- # Test bad handshake blocks IP temporarily # ----------------------------------------- for off in range(0, 3): # bad handshake try: conn = Conn.Conn('localhost', 1900 + off) resp = conn.do_request('ping') except: pass self._check_conn_blocked() # wait for block released time.sleep(2.0) # actually 1.5 sec # ------------------------------------------------------- # Can't connect mgmt port SMR_MAX_MGMT_CLIENTS_PER_IP(50) # ------------------------------------------------------- conns = [] for i in range(50 - 1): # -1 conn = Conn.Conn('localhost', 1903) resp = conn.do_request('ping') assert (len(resp) == 1 and resp[0].startswith('+OK')), resp conns.append(conn) self._check_conn_blocked() finally: if pgs is not None: pgs.kill_smr() pgs.kill_be() if cm is not None: cm.remove_workspace()
def _test_pg(self): cm = None pgs1 = None pgs2 = None try: cm = Cm.CM("test_pgs") cm.create_workspace() pg = Pg.PG(0) # pgs1 --> master pgs1 = Pgs.PGS(0, 'localhost', 1900, cm.dir) pg.join(pgs1, start=True) pgs1.smr.wait_role(Smr.SMR.MASTER) # pgs2 --> slave pgs2 = Pgs.PGS(1, 'localhost', 1910, cm.dir) pg.join(pgs2, start=True) pgs2.smr.wait_role(Smr.SMR.SLAVE) # kill pgs2, check quorum, pg.leave(pgs2.id, kill = True) assert pgs1.smr.getquorum() == 0 # join pgs2 pg.join(pgs2, start=True) pgs2.smr.wait_role(Smr.SMR.SLAVE) # kill pgs1 (check pgs1 is master) pg.leave(pgs1.id, kill = True) pgs2.smr.wait_role(Smr.SMR.MASTER) assert pgs2.smr.getquorum() == 0 # join pgs1 pg.join(pgs1, start=True) pgs1.smr.wait_role(Smr.SMR.SLAVE) finally: if pgs1 is not None: pgs1.kill_smr() pgs1.kill_be() if pgs2 is not None: pgs2.kill_smr() pgs2.kill_be() if cm is not None: cm.remove_workspace()
def test_log_recover_idempotent(self): cm = None pgs = None try: cm = Cm.CM("test_log_recover_idempotent") cm.create_workspace() pg = Pg.PG(0) # pgs --> master pgs = Pgs.PGS(0, 'localhost', self.BASE_PORT, cm.dir) pg.join(pgs, start=True) pgs.smr.wait_role(Smr.SMR.MASTER) # make some logs self.make_some_logs(runtime=10) time.sleep(1) # remember original sequences org_seqs = pgs.smr.getseq_log() # kill pgs pg.leave(pgs.id, kill=True) # log_recovery is dempotent for i in range(0, 3): # due to sucking master election logic in Pg.py we do not launch be currently # use PgNew later pgs.start_smr() seqs = pgs.smr.getseq_log() pgs.kill_smr() assert seqs['commit'] == org_seqs['commit'] assert seqs['min'] == org_seqs['min'] assert seqs['max'] == org_seqs['max'] finally: # Util.tstop('Check output!') if pgs is not None: pgs.kill_smr() pgs.kill_be() if cm is not None: cm.remove_workspace()
def reliable_failover(): try: gCtx.cm = Cm.CM("reliable_failover") gCtx.cm.create_workspace() setup_pgs() setup_cluster('reliable_failover') # wait initial green while True: time.sleep(0.5) allGreen = True for pgs in gCtx.pgs_ary: role = pgs.smr.get_role() if role != Smr.SMR.MASTER and role != Smr.SMR.SLAVE: allGreen = False break if allGreen: break for pgs in gCtx.pgs_ary: pgs.be.init_conn() print ("\n\n\n============================================================== START") start_clients() dirty_progress() stop_clients() print ("============================================================== END\n\n\n") except: traceback.print_exc() e = sys.exc_info()[0] print e Util.tstop('CHECK EXCEPTION') finally: try: teardown_cluster('reliable_failover', True) except: pass try: teardown_pgs() except: pass if gCtx.cm is not None: gCtx.cm.remove_workspace()
def test_broken_log_recover(self): ''' sigkill(9) smr and check recovery works ''' cm = None pgs = None try: cm = Cm.CM("test_broken_log_recover") cm.create_workspace() pg = Pg.PG(0) # pgs --> master pgs = Pgs.PGS(0, 'localhost', self.BASE_PORT, cm.dir) pg.join(pgs, start=True) pgs.smr.wait_role(Smr.SMR.MASTER) # make some logs self.make_broken_logs(pgs) # check idemponent org_seqs = None for i in range(0, 3): pgs.start_smr() seqs = pgs.smr.getseq_log() pgs.kill_smr(sigkill=True) if org_seqs == None: org_seqs = seqs print '--- ORG_SEQS' print org_seqs else: print '--- SEQS' print seqs assert seqs['commit'] == org_seqs['commit'] assert seqs['min'] == org_seqs['min'] assert seqs['max'] == org_seqs['max'] finally: if pgs is not None: pgs.kill_smr() pgs.kill_be() if cm is not None: cm.remove_workspace()
def test_be_retry(self): cm = None M = None S = None try: cm = Cm.CM("test_be_retry") cm.create_workspace() # M (master) S(slave) M = _init_pgs(0, 'localhost', 1900, cm.dir) M.smr.role_master(M.id, 1, 0) S = _init_pgs(1, 'localhost', 1910, cm.dir) S.smr.role_slave(S.id, 'localhost', M.base_port, 0) count = 1 while count > 0: S.smr.role_lconn() seqs = S.smr.getseq_log() # Util.tstop('before fault injection!') M.smr.fi_error_client_accept() S.smr.role_slave(S.id, 'localhost', M.base_port, seqs['max']) time.sleep(0.5) info = M.smr.info() #print (info) assert (('client' in info) and not ('client_1' in info['client'])) time.sleep(1.0) info = M.smr.info() #print (info) assert (('client' in info) and ('client_1' in info['client'])) count = count - 1 finally: if M is not None: M.kill() if S is not None: S.kill() if cm is not None: cm.remove_workspace()
def sprint(opt): if opt.valgrind_smr: Conf.VALGRIND_SMR = True if opt.use_mem_log: Conf.USE_MEM_LOG = True if opt.smr_opt_x: Conf.SMR_OPT_X = opt.smr_opt_x if opt.smr_bin_path: Conf.OVERRIDE_SMR_BIN_PATH=opt.smr_bin_path cm = None pgs = None pgs_ary = [] client_ary = [] try: cm = Cm.CM("test_be") cm.create_workspace() pg = Pg.PG(0) base_port = opt.base_port # master master = Pgs.PGS(0, 'localhost', base_port, cm.dir) pg.join(master, start=True) master.smr.wait_role(Smr.SMR.MASTER) pgs_ary.append(master); # slaves id = 1 for i in range(opt.num_slaves): slave = Pgs.PGS(id, 'localhost', base_port + id * 10, cm.dir) pg.join(slave, start=True) slave.smr.wait_role(Smr.SMR.SLAVE) id = id + 1 pgs_ary.append(slave) slotid = 0 # set option and make clients for pgs in pgs_ary: try: if opt.pin: if opt.pin == "be": pgs.be.pin('arc_ftrace', 'be.ftrace') elif opt.pin == "smr": pgs.smr.pin('arc_ftrace', 'smr.ftrace') except: traceback.print_exc() e = sys.exc_info()[0] print e pass if not opt.no_client: client = Client.Client() client.slotid = slotid client.add(slotid, 'localhost', pgs.base_port + 9) client.size(opt.size, slotid) client.tps(opt.tps, slotid) client_ary.append(client) pgs.client = client; slotid = slotid + 1 ##Util.tstop('DEBUG POINT') print '=====> Start!' for client in client_ary: client.start(client.slotid) # sleep and checkpoint runtime = 0 ckpt_interval = 0 while runtime < opt.runtime: time.sleep(1) runtime = runtime + 1 ckpt_interval = ckpt_interval + 1 if ckpt_interval >= opt.ckpt_interval: for pgs in pgs_ary: print 'ckeckpoint backend %d' % pgs.base_port pgs.be.ckpt() ckpt_interval = 0 print '=====> Done!' while len(client_ary) > 0: client = client_ary.pop(0) client.stop(client.slotid) print '======> Clients Stopped' print '========================== RESULT==========================' for pgs in pgs_ary: print '>>>>>>>>>>>>>>>>>>>>> PGS(%d)' % pgs.id print 'seq:', pgs.smr.getseq_log() info = pgs.smr.info('all') print 'info:\n', json.dumps(info, indent=4, sort_keys=True) if hasattr(pgs, 'client'): print pgs.client.stat(pgs.client.slotid) for pgs in pgs_ary: if hasattr(pgs, 'client'): print 'PGS(%d)' % pgs.id, pgs.client.stat(pgs.client.slotid) Util.tstop('Type to destroy intermediate results') except: traceback.print_exc() e = sys.exc_info()[0] print e finally: while len(client_ary) > 0: client = client_ary.pop(0) client.kill() while len(pgs_ary) > 0: pgs = pgs_ary.pop(0) pgs.kill_smr() pgs.kill_be() Util.tstop('CHECK OUTPUT') if cm is not None: cm.remove_workspace()
print ('traces',traces,type(traces)) print ('traveltime',traveltime,type(traveltime)) #==================================semblance calculation======================================== t1 = time.time() traces = traces.reshape (1,nostat*minSampleCount) traveltime = traveltime.reshape (1,nostat*dimX*dimY) USE_C_CODE = True try: if USE_C_CODE : import Cm import CTrig start_time = time.time() k = Cm.otest (maxp,nostat,nsamp,ntimes,nstep,dimX,dimY,Gmint,new_frequence, minSampleCount,latv,lonv,traveltime,traces) print("--- %s seconds ---" % (time.time() - start_time)) else : start_time = time.time() k = otest (maxp,nostat,nsamp,ntimes,nstep,dimX,dimY,Gmint,new_frequence, minSampleCount,latv,lonv,traveltime,traces) #hs print("--- %s seconds ---" % (time.time() - start_time)) except: print "loaded tttgrid has probably wrong dimensions or stations, delete\ ttgrid or exchange" t2 = time.time() partSemb = k
def test_be_reconfiguration(self): cm = None M = None C = None P = None try: cm = Cm.CM("test_pgs") cm.create_workspace() # M(1), S, S M = _init_pgs(0, 'localhost', 1900, cm.dir) M.smr.role_master(M.id, 1, 0) C = _init_pgs(1, 'localhost', 1910, cm.dir) C.smr.role_slave(C.id, 'localhost', M.base_port, 0) P = _init_pgs(2, 'localhost', 1920, cm.dir) P.smr.role_slave(P.id, 'localhost', M.base_port, 0) _attach_client(M, 1024, 1000) _attach_client(C, 1024, 1000) _attach_client(P, 1024, 1000) M.client.start(M.client.slotid) C.client.start(C.client.slotid) P.client.start(P.client.slotid) # loop loopCount = 100 for i in range(0, loopCount): # M(1) S L P.smr.role_lconn() # wait some time and check progress prev_mseqs = M.smr.getseq_log() prev_cseqs = C.smr.getseq_log() time.sleep(0.5) curr_mseqs = M.smr.getseq_log() curr_cseqs = C.smr.getseq_log() assert prev_mseqs['max'] < curr_mseqs['max'] assert prev_cseqs['max'] < curr_cseqs['max'] # M(1) S S ps = P.smr.getseq_log() P.smr.role_slave(P.id, 'localhost', M.base_port, ps['max']) # L L L M.smr.role_lconn() C.smr.role_lconn() P.smr.role_lconn() # alternate pgs1 and pgs2 tmp = M M = C C = tmp # M(1) S S ms = M.smr.getseq_log() cs = C.smr.getseq_log() ps = P.smr.getseq_log() M.smr.role_master(M.id, 1, ms['max']) C.smr.role_slave(C.id, 'localhost', M.base_port, min(ms['max'], cs['max'])) P.smr.role_slave(P.id, 'localhost', M.base_port, min(ms['max'], ps['max'])) # check progress and backend sanity mstat = M.client.stat(M.client.slotid) cstat = C.client.stat(C.client.slotid) pstat = P.client.stat(P.client.slotid) print "M ====>" + str(mstat) print "C ====>" + str(cstat) print "P ====>" + str(pstat) mresp = int(mstat['resp']) cresp = int(cstat['resp']) presp = int(pstat['resp']) sleepCount = 0 while True: time.sleep(0.1) mstat = M.client.stat(M.client.slotid) cstat = C.client.stat(C.client.slotid) pstat = P.client.stat(P.client.slotid) assert int(mstat['reconn']) == 0 and int( cstat['reconn']) == 0 and int(pstat['reconn']) == 0 mr = int(mstat['resp']) cr = int(cstat['resp']) pr = int(pstat['resp']) if mr > mresp and cr > cresp and pr > presp: break sleepCount = sleepCount + 1 assert sleepCount <= 10 # stop clients M.client.stop(M.client.slotid) C.client.stop(C.client.slotid) P.client.stop(P.client.slotid) finally: #Util.tstop('Check output!') if M is not None: M.kill() if C is not None: C.kill() if P is not None: P.kill() if cm is not None: cm.remove_workspace()
def doCalc(flag, Config, WaveformDict, FilterMetaData, Gmint, Gmaxt, TTTGridMap, Folder, Origin, ntimes, switch, ev, arrayfolder, syn_in): ''' method for calculating semblance of one station array ''' Logfile.add('PROCESS %d %s' % (flag, ' Enters Semblance Calculation')) Logfile.add('MINT : %f MAXT: %f Traveltime' % (Gmint, Gmaxt)) cfg = ConfigObj(dict=Config) cfg_f = FilterCfg(Config) timeev = util.str_to_time(ev.time) dimX = cfg.dimX() #('dimx') dimY = cfg.dimY() #('dimy') winlen = cfg.winlen() #('winlen') step = cfg.step() #('step') new_frequence = cfg.newFrequency() #('new_frequence') forerun = cfg.Int('forerun') duration = cfg.Int('duration') nostat = len(WaveformDict) traveltimes = {} recordstarttime = '' minSampleCount = 999999999 ntimes = int((forerun + duration) / step) nsamp = int(winlen * new_frequence) nstep = int(step * new_frequence) from pyrocko import obspy_compat from pyrocko import model obspy_compat.plant() ############################################################################ calcStreamMap = WaveformDict stations = [] py_trs = [] lats = [] lons = [] for trace in calcStreamMap.iterkeys(): py_tr = obspy_compat.to_pyrocko_trace(calcStreamMap[trace]) py_trs.append(py_tr) for il in FilterMetaData: if str(il) == str(trace): szo = model.Station(lat=float(il.lat), lon=float(il.lon), station=il.sta, network=il.net, channels=py_tr.channel, elevation=il.ele, location=il.loc) stations.append(szo) lats.append(float(il.lat)) lons.append(float(il.lon)) array_center = [num.mean(lats), num.mean(lons)] #==================================synthetic BeamForming====================== if cfg.Bool('synthetic_test') is True: store_id = syn_in.store() engine = LocalEngine(store_superdirs=[syn_in.store_superdirs()]) recordstarttimes = [] for tracex in calcStreamMap.iterkeys(): recordstarttimes.append( calcStreamMap[tracex].stats.starttime.timestamp) tr_org = obspy_compat.to_pyrocko_trace(calcStreamMap[tracex]) tmin = tr_org.tmin #tmin= num.min(recordstarttimes) targets = [] sources = [] for st in stations: target = Target(lat=st.lat, lon=st.lon, store_id=store_id, codes=(st.network, st.station, st.location, 'BHZ'), tmin=-6900, tmax=6900, interpolation='multilinear', quantity=cfg.quantity()) targets.append(target) if syn_in.nsources() == 1: if syn_in.use_specific_stf() is True: stf = syn_in.stf() exec(stf) else: stf = STF() if syn_in.source() == 'RectangularSource': sources.append( RectangularSource( lat=float(syn_in.lat_0()), lon=float(syn_in.lon_0()), east_shift=float(syn_in.east_shift_0()) * 1000., north_shift=float(syn_in.north_shift_0()) * 1000., depth=syn_in.depth_syn_0() * 1000., strike=syn_in.strike_0(), dip=syn_in.dip_0(), rake=syn_in.rake_0(), width=syn_in.width_0() * 1000., length=syn_in.length_0() * 1000., nucleation_x=syn_in.nucleation_x_0(), slip=syn_in.slip_0(), nucleation_y=syn_in.nucleation_y_0(), stf=stf, time=util.str_to_time(syn_in.time_0()))) if syn_in.source() == 'DCSource': sources.append( DCSource(lat=float(syn_in.lat_0()), lon=float(syn_in.lon_0()), east_shift=float(syn_in.east_shift_0()) * 1000., north_shift=float(syn_in.north_shift_0()) * 1000., depth=syn_in.depth_syn_0() * 1000., strike=syn_in.strike_0(), dip=syn_in.dip_0(), rake=syn_in.rake_0(), stf=stf, time=util.str_to_time(syn_in.time_0()), magnitude=syn_in.magnitude_0())) else: for i in range(syn_in.nsources()): if syn_in.use_specific_stf() is True: stf = syn_in.stf() exec(stf) else: stf = STF() if syn_in.source() == 'RectangularSource': sources.append( RectangularSource( lat=float(syn_in.lat_1(i)), lon=float(syn_in.lon_1(i)), east_shift=float(syn_in.east_shift_1(i)) * 1000., north_shift=float(syn_in.north_shift_1(i)) * 1000., depth=syn_in.depth_syn_1(i) * 1000., strike=syn_in.strike_1(i), dip=syn_in.dip_1(i), rake=syn_in.rake_1(i), width=syn_in.width_1(i) * 1000., length=syn_in.length_1(i) * 1000., nucleation_x=syn_in.nucleation_x_1(i), slip=syn_in.slip_1(i), nucleation_y=syn_in.nucleation_y_1(i), stf=stf, time=util.str_to_time(syn_in.time_1(i)))) if syn_in.source() == 'DCSource': sources.append( DCSource( lat=float(syn_in.lat_1(i)), lon=float(syn_in.lon_1(i)), east_shift=float(syn_in.east_shift_1(i)) * 1000., north_shift=float(syn_in.north_shift_1(i)) * 1000., depth=syn_in.depth_syn_1(i) * 1000., strike=syn_in.strike_1(i), dip=syn_in.dip_1(i), rake=syn_in.rake_1(i), stf=stf, time=util.str_to_time(syn_in.time_1(i)), magnitude=syn_in.magnitude_1(i))) #source = CombiSource(subsources=sources) synthetic_traces = [] for source in sources: response = engine.process(source, targets) synthetic_traces_source = response.pyrocko_traces() if not synthetic_traces: synthetic_traces = synthetic_traces_source else: for trsource, tr in zip(synthetic_traces_source, synthetic_traces): tr.add(trsource) from pyrocko import trace as trld #trld.snuffle(synthetic_traces) timeev = util.str_to_time(syn_in.time_0()) if cfg.Bool('synthetic_test_add_noise') is True: from noise_addition import add_noise trs_orgs = [] calcStreamMapsyn = calcStreamMap.copy() #from pyrocko import trace for tracex in calcStreamMapsyn.iterkeys(): for trl in synthetic_traces: if str(trl.name()[4:12]) == str(tracex[4:]) or str( trl.name()[3:13]) == str(tracex[3:]) or str( trl.name()[3:11]) == str(tracex[3:]) or str( trl.name()[3:14]) == str(tracex[3:]): tr_org = obspy_compat.to_pyrocko_trace( calcStreamMapsyn[tracex]) tr_org.downsample_to(2.0) trs_orgs.append(tr_org) store_id = syn_in.store() engine = LocalEngine(store_superdirs=[syn_in.store_superdirs()]) synthetic_traces = add_noise(trs_orgs, engine, source.pyrocko_event(), stations, store_id, phase_def='P') trs_org = [] trs_orgs = [] from pyrocko import trace fobj = os.path.join(arrayfolder, 'shift.dat') calcStreamMapsyn = calcStreamMap.copy() for tracex in calcStreamMapsyn.iterkeys(): for trl in synthetic_traces: if str(trl.name()[4:12]) == str(tracex[4:]) or str( trl.name()[3:13]) == str(tracex[3:]) or str( trl.name()[3:11]) == str(tracex[3:]) or str( trl.name()[3:14]) == str(tracex[3:]): mod = trl recordstarttime = calcStreamMapsyn[ tracex].stats.starttime.timestamp recordendtime = calcStreamMapsyn[ tracex].stats.endtime.timestamp tr_org = obspy_compat.to_pyrocko_trace( calcStreamMapsyn[tracex]) if switch == 0: tr_org.bandpass(4, cfg_f.flo(), cfg_f.fhi()) elif switch == 1: tr_org.bandpass(4, cfg_f.flo2(), cfg_f.fhi2()) trs_orgs.append(tr_org) tr_org_add = mod.chop(recordstarttime, recordendtime, inplace=False) synthetic_obs_tr = obspy_compat.to_obspy_trace(tr_org_add) calcStreamMapsyn[tracex] = synthetic_obs_tr trs_org.append(tr_org_add) calcStreamMap = calcStreamMapsyn if cfg.Bool('shift_by_phase_pws') == True: calcStreamMapshifted = calcStreamMap.copy() from obspy.core import stream stream = stream.Stream() for trace in calcStreamMapshifted.iterkeys(): stream.append(calcStreamMapshifted[trace]) pws_stack = PWS_stack([stream], weight=2, normalize=True) for tr in pws_stack: for trace in calcStreamMapshifted.iterkeys(): calcStreamMapshifted[trace] = tr calcStreamMap = calcStreamMapshifted if cfg.Bool('shift_by_phase_cc') is True: from stacking import align_traces calcStreamMapshifted = calcStreamMap.copy() list_tr = [] for trace in calcStreamMapshifted.iterkeys(): tr_org = calcStreamMapshifted[trace] list_tr.append(tr_org) shifts, ccs = align_traces(list_tr, 10, master=False) for shift in shifts: for trace in calcStreamMapshifted.iterkeys(): tr_org = obspy_compat.to_pyrocko_trace( calcStreamMapshifted[trace]) tr_org.shift(shift) shifted = obspy_compat.to_obspy_trace(tr_org) calcStreamMapshifted[trace] = shifted calcStreamMap = calcStreamMapshifted if cfg.Bool('shift_by_phase_onset') is True: pjoin = os.path.join timeev = util.str_to_time(ev.time) trs_orgs = [] calcStreamMapshifted = calcStreamMap.copy() for trace in calcStreamMapshifted.iterkeys(): tr_org = obspy_compat.to_pyrocko_trace(calcStreamMapshifted[trace]) trs_orgs.append(tr_org) timing = CakeTiming( phase_selection='first(p|P|PP|P(cmb)P(icb)P(icb)p(cmb)p)-20', fallback_time=100.) traces = trs_orgs event = model.Event(lat=float(ev.lat), lon=float(ev.lon), depth=ev.depth * 1000., time=timeev) directory = arrayfolder bf = BeamForming(stations, traces, normalize=True) shifted_traces = bf.process(event=event, timing=timing, fn_dump_center=pjoin( directory, 'array_center.pf'), fn_beam=pjoin(directory, 'beam.mseed')) i = 0 store_id = syn_in.store() engine = LocalEngine(store_superdirs=[syn_in.store_superdirs()]) for tracex in calcStreamMapshifted.iterkeys(): for trl in shifted_traces: if str(trl.name()[4:12]) == str(tracex[4:]) or str( trl.name()[3:13]) == str(tracex[3:]) or str( trl.name()[3:11]) == str(tracex[3:]) or str( trl.name()[3:14]) == str(tracex[3:]): mod = trl recordstarttime = calcStreamMapshifted[ tracex].stats.starttime.timestamp recordendtime = calcStreamMapshifted[ tracex].stats.endtime.timestamp tr_org = obspy_compat.to_pyrocko_trace( calcStreamMapshifted[tracex]) tr_org_add = mod.chop(recordstarttime, recordendtime, inplace=False) shifted_obs_tr = obspy_compat.to_obspy_trace(tr_org_add) calcStreamMapshifted[tracex] = shifted_obs_tr calcStreamMap = calcStreamMapshifted weight = 1. if cfg.Bool('weight_by_noise') is True: from noise_analyser import analyse pjoin = os.path.join timeev = util.str_to_time(ev.time) trs_orgs = [] calcStreamMapshifted = calcStreamMap.copy() for trace in calcStreamMapshifted.iterkeys(): tr_org = obspy_compat.to_pyrocko_trace(calcStreamMapshifted[trace]) trs_orgs.append(tr_org) timing = CakeTiming( phase_selection='first(p|P|PP|P(cmb)P(icb)P(icb)p(cmb)p)-20', fallback_time=100.) traces = trs_orgs event = model.Event(lat=float(ev.lat), lon=float(ev.lon), depth=ev.depth * 1000., time=timeev) directory = arrayfolder bf = BeamForming(stations, traces, normalize=True) shifted_traces = bf.process(event=event, timing=timing, fn_dump_center=pjoin( directory, 'array_center.pf'), fn_beam=pjoin(directory, 'beam.mseed')) i = 0 store_id = syn_in.store() engine = LocalEngine(store_superdirs=[syn_in.store_superdirs()]) weight = analyse(shifted_traces, engine, event, stations, 100., store_id, nwindows=1, check_events=True, phase_def='P') if cfg.Bool('array_response') is True: from obspy.signal import array_analysis from obspy.core import stream ntimesr = int((forerun + duration) / step) nsampr = int(winlen) nstepr = int(step) sll_x = -3.0 slm_x = 3.0 sll_y = -3.0 slm_y = 3.0 sl_s = 0.03, # sliding window properties # frequency properties frqlow = 1.0, frqhigh = 8.0 prewhiten = 0 # restrict output semb_thres = -1e9 vel_thres = -1e9 stime = stime etime = etime stream_arr = stream.Stream() for trace in calcStreamMapshifted.iterkeys(): stream_arr.append(calcStreamMapshifted[trace]) results = array_analysis.array_processing(stream_arr, nsamp, nstep,\ sll_x, slm_x, sll_y, slm_y,\ sl_s, semb_thres, vel_thres, \ frqlow, frqhigh, stime, \ etime, prewhiten) timestemp = results[0] relative_relpow = results[1] absolute_relpow = results[2] for trace in calcStreamMap.iterkeys(): recordstarttime = calcStreamMap[trace].stats.starttime d = calcStreamMap[trace].stats.starttime d = d.timestamp if calcStreamMap[trace].stats.npts < minSampleCount: minSampleCount = calcStreamMap[trace].stats.npts ########################################################################### traces = num.ndarray(shape=(len(calcStreamMap), minSampleCount), dtype=float) traveltime = num.ndarray(shape=(len(calcStreamMap), dimX * dimY), dtype=float) latv = num.ndarray(dimX * dimY, dtype=float) lonv = num.ndarray(dimX * dimY, dtype=float) ########################################################################### c = 0 streamCounter = 0 for key in calcStreamMap.iterkeys(): streamID = key c2 = 0 for o in calcStreamMap[key]: if c2 < minSampleCount: traces[c][c2] = o c2 += 1 for key in TTTGridMap.iterkeys(): if streamID == key: traveltimes[streamCounter] = TTTGridMap[key] else: "NEIN", streamID, key if not streamCounter in traveltimes: continue #hs : thread crashed before g = traveltimes[streamCounter] dimZ = g.dimZ mint = g.mint gridElem = g.GridArray for x in range(dimX): for y in range(dimY): elem = gridElem[x, y] traveltime[c][x * dimY + y] = elem.tt latv[x * dimY + y] = elem.lat lonv[x * dimY + y] = elem.lon #endfor c += 1 streamCounter += 1 #endfor ################ CALCULATE PARAMETER FOR SEMBLANCE CALCULATION ######## nsamp = winlen * new_frequence nstep = step * new_frequence migpoints = dimX * dimY dimZ = 0 maxp = int(Config['ncore']) Logfile.add('PROCESS %d NTIMES: %d' % (flag, ntimes)) if False: print('nostat ', nostat, type(nostat)) print('nsamp ', nsamp, type(nsamp)) print('ntimes ', ntimes, type(ntimes)) print('nstep ', nstep, type(nstep)) print('dimX ', dimX, type(dimX)) print('dimY ', dimY, type(dimY)) print('mint ', Gmint, type(mint)) print('new_freq ', new_frequence, type(new_frequence)) print('minSampleCount ', minSampleCount, type(minSampleCount)) print('latv ', latv, type(latv)) print('traces', traces, type(traces)) #===================compressed sensing================================= try: cs = cfg.cs() except: cs = 0 if cs == 1: csmaxvaluev = num.ndarray(ntimes, dtype=float) csmaxlatv = num.ndarray(ntimes, dtype=float) csmaxlonv = num.ndarray(ntimes, dtype=float) folder = Folder['semb'] fobjcsmax = open(os.path.join(folder, 'csmax_%s.txt' % (switch)), 'w') traveltimes = traveltime.reshape(1, nostat * dimX * dimY) traveltime2 = toMatrix(traveltimes, dimX * dimY) # for relstart traveltime = traveltime.reshape(dimX * dimY, nostat) import matplotlib as mpl import scipy.optimize as spopt import scipy.fftpack as spfft import scipy.ndimage as spimg import cvxpy as cvx import matplotlib.pyplot as plt A = spfft.idct(traveltime, norm='ortho', axis=0) n = (nostat * dimX * dimY) vx = cvx.Variable(dimX * dimY) res = cvx.Variable(1) objective = cvx.Minimize(cvx.norm(res, 1)) back2 = num.zeros([dimX, dimY]) l = int(nsamp) fobj = open( os.path.join(folder, '%s-%s_%03d.cs' % (switch, Origin['depth'], l)), 'w') for i in range(ntimes): ydata = [] try: for tr in traces: relstart = int((dimX * dimY - mint) * new_frequence + 0.5) + i * nstep tr = spfft.idct(tr[relstart + i:relstart + i + dimX * dimY], norm='ortho', axis=0) ydata.append(tr) ydata = num.asarray(ydata) ydata = ydata.reshape(dimX * dimY, nostat) constraints = [ res == cvx.sum_entries(0 + num.sum([ ydata[:, x] - A[:, x] * vx for x in range(nostat) ])) ] prob = cvx.Problem(objective, constraints) result = prob.solve(verbose=False, max_iters=200) x = num.array(vx.value) x = num.squeeze(x) back1 = x.reshape(dimX, dimY) sig = spfft.idct(x, norm='ortho', axis=0) back2 = back2 + back1 xs = num.array(res.value) xs = num.squeeze(xs) max_cs = num.max(back1) idx = num.where(back1 == back1.max()) csmaxvaluev[i] = max_cs csmaxlatv[i] = latv[idx[0]] csmaxlonv[i] = lonv[idx[1]] fobj.write('%.5f %.5f %.20f\n' % (latv[idx[0]], lonv[idx[1]], max_cs)) fobjcsmax.write('%.5f %.5f %.20f\n' % (latv[idx[0]], lonv[idx[1]], max_cs)) fobj.close() fobjcsmax.close() except: pass #==================================semblance calculation======================================== t1 = time.time() traces = traces.reshape(1, nostat * minSampleCount) traveltimes = traveltime.reshape(1, nostat * dimX * dimY) USE_C_CODE = False #try: if USE_C_CODE: import Cm import CTrig start_time = time.time() k = Cm.otest(maxp, nostat, nsamp, ntimes, nstep, dimX, dimY, Gmint, new_frequence, minSampleCount, latv, lonv, traveltimes, traces) print("--- %s seconds ---" % (time.time() - start_time)) else: start_time = time.time() ntimes = int((forerun + duration) / step) nsamp = int(winlen) nstep = int(step) Gmint = cfg.Int('forerun') k = otest(maxp, nostat, nsamp, ntimes, nstep, dimX, dimY, Gmint, new_frequence, minSampleCount, latv, lonv, traveltimes, traces, calcStreamMap, timeev) print("--- %s seconds ---" % (time.time() - start_time)) #except ValueError: # k = Cm.otest(maxp,nostat,nsamp,ntimes,nstep,dimX,dimY,Gmint,new_frequence, # minSampleCount,latv,lonv,traveltimes,traces) # print "loaded tttgrid has probably wrong dimensions or stations,\ # delete ttgrid or exchange is recommended" t2 = time.time() Logfile.add('%s took %0.3f s' % ('CALC:', (t2 - t1))) partSemb = k partSemb = partSemb.reshape(ntimes, migpoints) return partSemb, weight, array_center
def doCalc_syn (flag,Config,WaveformDict,FilterMetaData,Gmint,Gmaxt,TTTGridMap, Folder,Origin, ntimes, switch, ev,arrayfolder, syn_in, parameter): ''' method for calculating semblance of one station array ''' Logfile.add ('PROCESS %d %s' % (flag,' Enters Semblance Calculation') ) Logfile.add ('MINT : %f MAXT: %f Traveltime' % (Gmint,Gmaxt)) cfg = ConfigObj (dict=Config) dimX = cfg.dimX() # ('dimx') dimY = cfg.dimY() # ('dimy') winlen = cfg.winlen () # ('winlen') step = cfg.step() # ('step') new_frequence = cfg.newFrequency() #('new_frequence') forerun= cfg.Int('forerun') duration= cfg.Int('duration') gridspacing = cfg.Float('gridspacing') nostat = len (WaveformDict) traveltimes = {} recordstarttime = '' minSampleCount = 999999999 if cfg.UInt ('forerun')>0: ntimes = int ((cfg.UInt ('forerun') + cfg.UInt ('duration') ) / cfg.UInt ('step') ) else: ntimes = int ((cfg.UInt ('duration') ) / cfg.UInt ('step') ) nsamp = int (winlen * new_frequence) nstep = int (step * new_frequence) from pyrocko import obspy_compat from pyrocko import orthodrome, model obspy_compat.plant() ############################################################################ calcStreamMap = WaveformDict stations = [] py_trs = [] for trace in calcStreamMap.keys(): py_tr = obspy_compat.to_pyrocko_trace(calcStreamMap[trace]) py_trs.append(py_tr) for il in FilterMetaData: if str(il) == str(trace): szo = model.Station(lat=il.lat, lon=il.lon, station=il.sta, network=il.net, channels=py_tr.channel, elevation=il.ele, location=il.loc) stations.append(szo) #right number of stations? store_id = syn_in.store() engine = LocalEngine(store_superdirs=[syn_in.store_superdirs()]) targets = [] for st in stations: target = Target( lat=st.lat, lon=st.lon, store_id=store_id, codes=(st.network, st.station, st.location, 'BHZ'), tmin=-1900, tmax=3900, interpolation='multilinear', quantity=cfg.quantity()) targets.append(target) if syn_in.nsources() == 1: if syn_in.use_specific_stf() is True: stf = syn_in.stf() exec(stf) else: stf = STF() if syn_in.source() == 'RectangularSource': source = RectangularSource( lat=float(syn_in.lat_0()), lon=float(syn_in.lon_0()), depth=syn_in.depth_syn_0()*1000., strike=syn_in.strike_0(), dip=syn_in.dip_0(), rake=syn_in.rake_0(), width=syn_in.width_0()*1000., length=syn_in.length_0()*1000., nucleation_x=syn_in.nucleation_x_0(), slip=syn_in.slip_0(), nucleation_y=syn_in.nucleation_y_0(), stf=stf, time=util.str_to_time(syn_in.time_0())) if syn_in.source() == 'DCSource': source = DCSource( lat=float(syn_in.lat_0()), lon=float(syn_in.lon_0()), depth=syn_in.depth_syn_0()*1000., strike=syn_in.strike_0(), dip=syn_in.dip_0(), rake=syn_in.rake_0(), stf=stf, time=util.str_to_time(syn_in.time_0()), magnitude=syn_in.magnitude_0()) else: sources = [] for i in range(syn_in.nsources()): if syn_in.use_specific_stf() is True: stf = syn_in.stf() exec(stf) else: stf = STF() if syn_in.source() == 'RectangularSource': sources.append(RectangularSource( lat=float(syn_in.lat_1(i)), lon=float(syn_in.lon_1(i)), depth=syn_in.depth_syn_1(i)*1000., strike=syn_in.strike_1(i), dip=syn_in.dip_1(i), rake=syn_in.rake_1(i), width=syn_in.width_1(i)*1000., length=syn_in.length_1(i)*1000., nucleation_x=syn_in.nucleation_x_1(i), slip=syn_in.slip_1(i), nucleation_y=syn_in.nucleation_y_1(i), stf=stf, time=util.str_to_time(syn_in.time_1(i)))) if syn_in.source() == 'DCSource': sources.append(DCSource( lat=float(syn_in.lat_1(i)), lon=float(syn_in.lon_1(i)), depth=syn_in.depth_1(i)*1000., strike=syn_in.strike_1(i), dip=syn_in.dip_1(i), rake=syn_in.rake_1(i), stf=stf, time=util.str_to_time(syn_in.time_1(i)), magnitude=syn_in.magnitude_1(i))) source = CombiSource(subsources=sources) response = engine.process(source, targets) synthetic_traces = response.pyrocko_traces() if cfg.Bool('synthetic_test_add_noise') is True: from noise_addition import add_noise trs_orgs = [] calcStreamMapsyn = calcStreamMap.copy() #from pyrocko import trace for tracex in calcStreamMapsyn.keys(): for trl in synthetic_traces: if str(trl.name()[4:12]) == str(tracex[4:]): tr_org = obspy_compat.to_pyrocko_trace(calcStreamMapsyn[tracex]) tr_org.downsample_to(2.0) trs_orgs.append(tr_org) store_id = syn_in.store() engine = LocalEngine(store_superdirs=[syn_in.store_superdirs()]) synthetic_traces = add_noise(trs_orgs, engine, source.pyrocko_event(), stations, store_id, phase_def='P') trs_org = [] trs_orgs = [] fobj = os.path.join(arrayfolder, 'shift.dat') xy = num.loadtxt(fobj, usecols=1, delimiter=',') calcStreamMapsyn = calcStreamMap.copy() #from pyrocko import trace for tracex in calcStreamMapsyn.keys(): for trl in synthetic_traces: if str(trl.name()[4:12])== str(tracex[4:]): mod = trl recordstarttime = calcStreamMapsyn[tracex].stats.starttime.timestamp recordendtime = calcStreamMapsyn[tracex].stats.endtime.timestamp tr_org = obspy_compat.to_pyrocko_trace(calcStreamMapsyn[tracex]) trs_orgs.append(tr_org) tr_org_add = mod.chop(recordstarttime, recordendtime, inplace=False) synthetic_obs_tr = obspy_compat.to_obspy_trace(tr_org_add) calcStreamMapsyn[tracex] = synthetic_obs_tr trs_org.append(tr_org_add) calcStreamMap = calcStreamMapsyn if cfg.Bool('shift_by_phase_pws') == True: calcStreamMapshifted= calcStreamMap.copy() from obspy.core import stream stream = stream.Stream() for trace in calcStreamMapshifted.keys(): stream.append(calcStreamMapshifted[trace]) pws_stack = PWS_stack([stream], weight=2, normalize=True) for tr in pws_stack: for trace in calcStreamMapshifted.keys(): calcStreamMapshifted[trace]=tr calcStreamMap = calcStreamMapshifted if cfg.Bool('shift_by_phase_onset') == True: pjoin = os.path.join timeev = util.str_to_time(ev.time) trs_orgs= [] calcStreamMapshifted= calcStreamMap.copy() for trace in calcStreamMapshifted.keys(): tr_org = obspy_compat.to_pyrocko_trace(calcStreamMapshifted[trace]) trs_orgs.append(tr_org) timing = CakeTiming( phase_selection='first(p|P|PP|P(cmb)P(icb)P(icb)p(cmb)p)-20', fallback_time=100.) traces = trs_orgs event = model.Event(lat=float(ev.lat), lon=float(ev.lon), depth=ev.depth*1000., time=timeev) directory = arrayfolder bf = BeamForming(stations, traces, normalize=True) shifted_traces = bf.process(event=event, timing=timing, fn_dump_center=pjoin(directory, 'array_center.pf'), fn_beam=pjoin(directory, 'beam.mseed')) i = 0 store_id = syn_in.store() engine = LocalEngine(store_superdirs=[syn_in.store_superdirs()]) for trace in calcStreamMapshifted.keys(): recordstarttime = calcStreamMapshifted[trace].stats.starttime.timestamp recordendtime = calcStreamMapshifted[trace].stats.endtime.timestamp mod = shifted_traces[i] extracted = mod.chop(recordstarttime, recordendtime, inplace=False) shifted_obs_tr = obspy_compat.to_obspy_trace(extracted) calcStreamMapshifted[trace]=shifted_obs_tr i = i+1 calcStreamMap = calcStreamMapshifted weight = 0. if cfg.Bool('weight_by_noise') == True: from noise_analyser import analyse pjoin = os.path.join timeev = util.str_to_time(ev.time) trs_orgs= [] calcStreamMapshifted= calcStreamMap.copy() for trace in calcStreamMapshifted.keys(): tr_org = obspy_compat.to_pyrocko_trace(calcStreamMapshifted[trace]) trs_orgs.append(tr_org) timing = CakeTiming( phase_selection='first(p|P|PP|P(cmb)P(icb)P(icb)p(cmb)p)-20', fallback_time=100.) traces = trs_orgs event = model.Event(lat=float(ev.lat), lon=float(ev.lon), depth=ev.depth*1000., time=timeev) directory = arrayfolder bf = BeamForming(stations, traces, normalize=True) shifted_traces = bf.process(event=event, timing=timing, fn_dump_center=pjoin(directory, 'array_center.pf'), fn_beam=pjoin(directory, 'beam.mseed')) i = 0 store_id = syn_in.store() engine = LocalEngine(store_superdirs=[syn_in.store_superdirs()]) weight = analyse(shifted_traces, engine, event, stations, 100., store_id, nwindows=1, check_events=True, phase_def='P') for trace in calcStreamMap.keys(): recordstarttime = calcStreamMap[trace].stats.starttime d = calcStreamMap[trace].stats.starttime d = d.timestamp if calcStreamMap[trace].stats.npts < minSampleCount: minSampleCount = calcStreamMap[trace].stats.npts ############################################################################ traces = num.ndarray (shape=(len(calcStreamMap), minSampleCount), dtype=float) traveltime = num.ndarray (shape=(len(calcStreamMap), dimX*dimY), dtype=float) latv = num.ndarray (dimX*dimY, dtype=float) lonv = num.ndarray (dimX*dimY, dtype=float) ############################################################################ c=0 streamCounter = 0 for key in calcStreamMap.keys(): streamID = key c2 = 0 for o in calcStreamMap[key]: if c2 < minSampleCount: traces[c][c2] = o c2 += 1 for key in TTTGridMap.keys(): if streamID == key: traveltimes[streamCounter] = TTTGridMap[key] else: "NEIN", streamID, key if not streamCounter in traveltimes : continue #hs : thread crashed before g = traveltimes[streamCounter] dimZ = g.dimZ mint = g.mint maxt = g.maxt Latul = g.Latul Lonul = g.Lonul Lator = g.Lator Lonor = g.Lonor gridElem = g.GridArray for x in range(dimX): for y in range(dimY): elem = gridElem[x, y] traveltime [c][x * dimY + y] = elem.tt latv [x * dimY + y] = elem.lat lonv [x * dimY + y] = elem.lon #endfor c += 1 streamCounter += 1 #endfor ############################## CALCULATE PARAMETER FOR SEMBLANCE CALCULATION ################## nsamp = winlen * new_frequence nstep = int (step*new_frequence) migpoints = dimX * dimY dimZ = 0 new_frequence = cfg.newFrequency () # ['new_frequence'] maxp = int (Config['ncore']) Logfile.add ('PROCESS %d NTIMES: %d' % (flag,ntimes)) if False : print ('nostat ',nostat,type(nostat)) print ('nsamp ',nsamp,type(nsamp)) print ('ntimes ',ntimes,type(ntimes)) print ('nstep ',nstep,type(nstep)) print ('dimX ',dimX,type(dimX)) print ('dimY ',dimY,type(dimY)) print ('mint ',Gmint,type(mint)) print ('new_freq ',new_frequence,type(new_frequence)) print ('minSampleCount ',minSampleCount,type(minSampleCount)) print ('latv ',latv,type(latv)) print ('traces',traces,type(traces)) print ('traveltime',traveltime,type(traveltime)) #==================================semblance calculation======================================== t1 = time.time() traces = traces.reshape (1,nostat*minSampleCount) traveltime = traveltime.reshape (1,nostat*dimX*dimY) USE_C_CODE = True try: if USE_C_CODE : import Cm import CTrig start_time = time.time() k = Cm.otest (maxp,nostat,nsamp,ntimes,nstep,dimX,dimY,Gmint,new_frequence, minSampleCount,latv,lonv,traveltime,traces) print("--- %s seconds ---" % (time.time() - start_time)) else : start_time = time.time() k = otest (maxp,nostat,nsamp,ntimes,nstep,dimX,dimY,Gmint,new_frequence, minSampleCount,latv,lonv,traveltime,traces) #hs print("--- %s seconds ---" % (time.time() - start_time)) except: print("loaded tttgrid has probably wrong dimensions or stations, delete\ ttgrid or exchange") t2 = time.time() partSemb = k partSemb_syn = partSemb.reshape (ntimes,migpoints) return partSemb_syn
def doCalc(flag, Config, WaveformDict, FilterMetaData, Gmint, Gmaxt, TTTGridMap, Folder, Origin, ntimes, switch, ev, arrayfolder, syn_in): ''' method for calculating semblance of one station array ''' Logfile.add('PROCESS %d %s' % (flag, ' Enters Semblance Calculation')) Logfile.add('MINT : %f MAXT: %f Traveltime' % (Gmint, Gmaxt)) cfg = ConfigObj(dict=Config) dimX = cfg.dimX() # ('dimx') dimY = cfg.dimY() # ('dimy') winlen = cfg.winlen() # ('winlen') step = cfg.step() # ('step') new_frequence = cfg.newFrequency() #('new_frequence') forerun = cfg.Int('forerun') duration = cfg.Int('duration') gridspacing = cfg.Float('gridspacing') nostat = len(WaveformDict) traveltimes = {} recordstarttime = '' minSampleCount = 999999999 ntimes = int((forerun + duration) / step) nsamp = int(winlen * new_frequence) nstep = int(step * new_frequence) from pyrocko import obspy_compat from pyrocko import orthodrome, model obspy_compat.plant() ############################################################################ calcStreamMap = WaveformDict stations = [] py_trs = [] for trace in calcStreamMap.iterkeys(): py_tr = obspy_compat.to_pyrocko_trace(calcStreamMap[trace]) py_trs.append(py_tr) for il in FilterMetaData: if str(il) == str(trace): szo = model.Station(lat=il.lat, lon=il.lon, station=il.sta, network=il.net, channels=py_tr.channel, elevation=il.ele, location=il.loc) stations.append(szo) #right number of stations? #==================================synthetic BeamForming======================================= if cfg.Bool('shift_by_phase_pws') == True: calcStreamMapshifted = calcStreamMap.copy() from obspy.core import stream stream = stream.Stream() for trace in calcStreamMapshifted.iterkeys(): stream.append(calcStreamMapshifted[trace]) pws_stack = PWS_stack([stream], weight=2, normalize=True) for tr in pws_stack: for trace in calcStreamMapshifted.iterkeys(): calcStreamMapshifted[trace] = tr calcStreamMap = calcStreamMapshifted if cfg.Bool('shift_by_phase_onset') == True: pjoin = os.path.join timeev = util.str_to_time(ev.time) trs_orgs = [] calcStreamMapshifted = calcStreamMap.copy() for trace in calcStreamMapshifted.iterkeys(): tr_org = obspy_compat.to_pyrocko_trace(calcStreamMapshifted[trace]) trs_orgs.append(tr_org) timing = CakeTiming( phase_selection='first(p|P|PP|P(cmb)P(icb)P(icb)p(cmb)p)-20', fallback_time=100.) traces = trs_orgs event = model.Event(lat=float(ev.lat), lon=float(ev.lon), depth=ev.depth * 1000., time=timeev) directory = arrayfolder bf = BeamForming(stations, traces, normalize=True) shifted_traces = bf.process(event=event, timing=timing, fn_dump_center=pjoin( directory, 'array_center.pf'), fn_beam=pjoin(directory, 'beam.mseed')) i = 0 store_id = syn_in.store() engine = LocalEngine(store_superdirs=[syn_in.store_superdirs()]) for trace in calcStreamMapshifted.iterkeys(): recordstarttime = calcStreamMapshifted[ trace].stats.starttime.timestamp recordendtime = calcStreamMapshifted[trace].stats.endtime.timestamp mod = shifted_traces[i] extracted = mod.chop(recordstarttime, recordendtime, inplace=False) shifted_obs_tr = obspy_compat.to_obspy_trace(extracted) calcStreamMapshifted[trace] = shifted_obs_tr i = i + 1 calcStreamMap = calcStreamMapshifted weight = 0. if cfg.Bool('weight_by_noise') == True: from noise_analyser import analyse pjoin = os.path.join timeev = util.str_to_time(ev.time) trs_orgs = [] calcStreamMapshifted = calcStreamMap.copy() for trace in calcStreamMapshifted.iterkeys(): tr_org = obspy_compat.to_pyrocko_trace(calcStreamMapshifted[trace]) trs_orgs.append(tr_org) timing = CakeTiming( phase_selection='first(p|P|PP|P(cmb)P(icb)P(icb)p(cmb)p)-20', fallback_time=100.) traces = trs_orgs event = model.Event(lat=float(ev.lat), lon=float(ev.lon), depth=ev.depth * 1000., time=timeev) directory = arrayfolder bf = BeamForming(stations, traces, normalize=True) shifted_traces = bf.process(event=event, timing=timing, fn_dump_center=pjoin( directory, 'array_center.pf'), fn_beam=pjoin(directory, 'beam.mseed')) i = 0 store_id = syn_in.store() engine = LocalEngine(store_superdirs=[syn_in.store_superdirs()]) weight = analyse(shifted_traces, engine, event, stations, 100., store_id, nwindows=1, check_events=True, phase_def='P') for trace in calcStreamMap.iterkeys(): recordstarttime = calcStreamMap[trace].stats.starttime d = calcStreamMap[trace].stats.starttime d = d.timestamp if calcStreamMap[trace].stats.npts < minSampleCount: minSampleCount = calcStreamMap[trace].stats.npts ############################################################################ traces = num.ndarray(shape=(len(calcStreamMap), minSampleCount), dtype=float) traveltime = num.ndarray(shape=(len(calcStreamMap), dimX * dimY), dtype=float) latv = num.ndarray(dimX * dimY, dtype=float) lonv = num.ndarray(dimX * dimY, dtype=float) ############################################################################ c = 0 streamCounter = 0 for key in calcStreamMap.iterkeys(): streamID = key c2 = 0 for o in calcStreamMap[key]: if c2 < minSampleCount: traces[c][c2] = o c2 += 1 for key in TTTGridMap.iterkeys(): if streamID == key: traveltimes[streamCounter] = TTTGridMap[key] else: "NEIN", streamID, key if not streamCounter in traveltimes: continue #hs : thread crashed before g = traveltimes[streamCounter] dimZ = g.dimZ mint = g.mint maxt = g.maxt Latul = g.Latul Lonul = g.Lonul Lator = g.Lator Lonor = g.Lonor gridElem = g.GridArray for x in range(dimX): for y in range(dimY): elem = gridElem[x, y] traveltime[c][x * dimY + y] = elem.tt latv[x * dimY + y] = elem.lat lonv[x * dimY + y] = elem.lon #endfor c += 1 streamCounter += 1 #endfor ############################## CALCULATE PARAMETER FOR SEMBLANCE CALCULATION ################## nsamp = winlen * new_frequence nstep = int(step * new_frequence) migpoints = dimX * dimY dimZ = 0 new_frequence = cfg.newFrequency() # ['new_frequence'] maxp = int(Config['ncore']) Logfile.add('PROCESS %d NTIMES: %d' % (flag, ntimes)) if False: print('nostat ', nostat, type(nostat)) print('nsamp ', nsamp, type(nsamp)) print('ntimes ', ntimes, type(ntimes)) print('nstep ', nstep, type(nstep)) print('dimX ', dimX, type(dimX)) print('dimY ', dimY, type(dimY)) print('mint ', Gmint, type(mint)) print('new_freq ', new_frequence, type(new_frequence)) print('minSampleCount ', minSampleCount, type(minSampleCount)) print('latv ', latv, type(latv)) print('traces', traces, type(traces)) print('traveltime', traveltime, type(traveltime)) t1 = time.time() traces_org = traces.reshape(1, nostat * minSampleCount) traveltime_org = traveltime.reshape(1, nostat * dimX * dimY) USE_C_CODE = True try: if USE_C_CODE: import Cm import CTrig start_time = time.time() k = Cm.otest(maxp, nostat, nsamp, ntimes, nstep, dimX, dimY, Gmint, new_frequence, minSampleCount, latv, lonv, traveltime_org, traces_org) print("--- %s seconds ---" % (time.time() - start_time)) else: start_time = time.time() k = otest(maxp, nostat, nsamp, ntimes, nstep, dimX, dimY, Gmint, new_frequence, minSampleCount, latv, lonv, traveltime_org, traces_org) #hs print("--- %s seconds ---" % (time.time() - start_time)) except: print "loaded tttgrid has probably wrong dimensions or stations, delete\ ttgrid or exchange" t2 = time.time() Logfile.add('%s took %0.3f s' % ('CALC:', (t2 - t1))) partSemb = k partSemb_data = partSemb.reshape(ntimes, migpoints) return partSemb_data
def test_ping_during_log_send(self): cm = None pgs1 = None pgs2 = None try: cm = Cm.CM("test_pgs") cm.create_workspace() pg = Pg.PG(0) # pgs1 --> master pgs1 = Pgs.PGS(0, 'localhost', 1900, cm.dir) pg.join(pgs1, start=True) pgs1.smr.wait_role(Smr.SMR.MASTER) # make lots of logs clients = [] num_clients = 20 for i in range(0, num_clients): C = Client.Client() clients.append(C) C.slotid = i C.add(C.slotid, 'localhost', 1909) C.size(64 * 1024, C.slotid) # 1M C.tps(100, C.slotid) for C in clients: C.start(C.slotid) runtime_limit = 60 runtime = 0 while runtime < runtime_limit: time.sleep(1) runtime = runtime + 1 for C in clients: C.stop(C.slotid) # get log seqeunce of the master master_seqs = pgs1.smr.getseq_log() # pgs2 --> slave pgs2 = Pgs.PGS(1, 'localhost', 9000, cm.dir) pg.join(pgs2, start=True) pgs2.smr.wait_role(Smr.SMR.SLAVE) try_count = 0 count = 0 prev_seq = 0 master_response_times = [] slave_response_times = [] while True: try_count = try_count + 1 # master st = int(round(time.time() * 1000)) seqs = pgs1.smr.getseq_log() et = int(round(time.time() * 1000)) master_response_times.append(et - st) # slave st = int(round(time.time() * 1000)) seqs = pgs2.smr.getseq_log() et = int(round(time.time() * 1000)) slave_response_times.append(et - st) if prev_seq != seqs['max']: count = count + 1 prev_seq = seqs['max'] if master_seqs['max'] <= seqs['max']: break time.sleep(0.1) print "==========> try_count:%d count:%d" % (try_count, count) print "MASTER ==========>", master_response_times print "SLAVE ==========>", slave_response_times for rt in master_response_times: assert rt < 100 for rt in slave_response_times: assert rt < 100 finally: # Util.tstop('Check output!') if pgs1 is not None: pgs1.kill_smr() pgs1.kill_be() if pgs2 is not None: pgs2.kill_smr() pgs2.kill_be() if cm is not None: cm.remove_workspace()
def doCalc(flag, Config, WaveformDict, FilterMetaData, Gmint, Gmaxt, TTTGridMap, Folder, Origin, ntimes): ''' method for calculating semblance of one station array ''' Logfile.add('PROCESS %d %s' % (flag, ' Enters Semblance Calculation')) Logfile.add('MINT : %f MAXT: %f Traveltime' % (Gmint, Gmaxt)) cfg = ConfigObj(dict=Config) dimX = cfg.dimX() # ('dimx') dimY = cfg.dimY() # ('dimy') winlen = cfg.winlen() # ('winlen') step = cfg.step() # ('step') new_frequence = cfg.newFrequency() # ('new_frequence') forerun = cfg.Int('forerun') duration = cfg.Int('duration') gridspacing = cfg.Float('gridspacing') nostat = len(WaveformDict) traveltimes = {} recordstarttime = '' minSampleCount = 999999999 ntimes = int((forerun + duration) / step) nsamp = int(winlen * new_frequence) nstep = int(step * new_frequence) #for i in WaveformDict.iterkeys(): # print i,WaveformDict[i] ############################################################################ calcStreamMap = WaveformDict for trace in calcStreamMap.iterkeys(): recordstarttime = calcStreamMap[trace].stats.starttime d = calcStreamMap[trace].stats.starttime d = d.timestamp if calcStreamMap[trace].stats.npts < minSampleCount: minSampleCount = calcStreamMap[trace].stats.npts ############################################################################ traces = np.ndarray(shape=(len(calcStreamMap), minSampleCount), dtype=float) traveltime = np.ndarray(shape=(len(calcStreamMap), dimX * dimY), dtype=float) latv = np.ndarray(dimX * dimY, dtype=float) lonv = np.ndarray(dimX * dimY, dtype=float) ############################################################################ #traces = np.ndarray (nostat*minSampleCount,dtype=float) #traveltimes = np.ndarray (nostat*dimX*dimY,dtype=float) #latv = np.ndarray (dimX*dimY,dtype=float) #lonv = np.ndarray (dimX*dimY,dtype=float) #print 'minSC: ',minSampleCount,' LCSM: ',len(calcStreamMap) c = 0 streamCounter = 0 for key in calcStreamMap.iterkeys(): streamID = key c2 = 0 #print streamID, len(calcStreamMap[key]),minSampleCount for o in calcStreamMap[key]: if c2 < minSampleCount: traces[c][c2] = o #print 'C: ',c,' C2: ',c2,' TRACES:',traces[c][c2] c2 += 1 #endfor for key in TTTGridMap.iterkeys(): if streamID == key: traveltimes[streamCounter] = TTTGridMap[key] else: "NEIN", streamID, key #endfor if not streamCounter in traveltimes: continue #hs : thread crashed before g = traveltimes[streamCounter] dimZ = g.dimZ mint = g.mint maxt = g.maxt Latul = g.Latul Lonul = g.Lonul Lator = g.Lator Lonor = g.Lonor gridElem = g.GridArray for x in range(dimX): for y in range(dimY): elem = gridElem[x, y] traveltime[c][x * dimY + y] = elem.tt latv[x * dimY + y] = elem.lat lonv[x * dimY + y] = elem.lon #endfor c += 1 streamCounter += 1 #endfor ############################## CALCULATE PARAMETER FOR SEMBLANCE CALCULATION ################## nsamp = winlen * new_frequence nstep = int(step * new_frequence) migpoints = dimX * dimY dimZ = 0 new_frequence = cfg.newFrequency() # ['new_frequence'] maxp = int(Config['ncore']) #maxp = 20 #hs Logfile.add('PROCESS %d NTIMES: %d' % (flag, ntimes)) #k = Csemblance.semb(flag,nostat,nsamp,ntimes,nstep,Gmint,Gmaxt,Lonul,Latul,minSampleCount, # dimZ,dimX,dimY,new_frequence,ntimesstart,ntimesend,winlen,step,gridspacing, # latv,lonv,traveltime,traces,backveclen) #k = sembPar.semb (flag,nostat,nsamp,ntimes,nstep,Gmint,Gmaxt,Lonul,Latul,minSampleCount,dimZ, # dimX,dimY,new_frequence,ntimesstart,ntimesend,winlen,step,gridspacing,latv, # lonv,traveltime,traces,backveclen) if False: print('nostat ', nostat, type(nostat)) print('nsamp ', nsamp, type(nsamp)) print('ntimes ', ntimes, type(ntimes)) print('nstep ', nstep, type(nstep)) print('dimX ', dimX, type(dimX)) print('dimY ', dimY, type(dimY)) print('mint ', Gmint, type(mint)) print('new_freq ', new_frequence, type(new_frequence)) print('minSampleCount ', minSampleCount, type(minSampleCount)) print('latv ', latv, type(latv)) print('traces', traces, type(traces)) print('traveltime', traveltime, type(traveltime)) traveltime = traveltime.reshape(1, nostat * dimX * dimY) traces = traces.reshape(1, nostat * minSampleCount) #print 'traveltime2',traveltime,type(traveltime) t1 = time.time() if USE_C_CODE: k = Cm.otest(maxp, nostat, nsamp, ntimes, nstep, dimX, dimY, Gmint, new_frequence, minSampleCount, latv, lonv, traveltime, traces) else: k = otest(maxp, nostat, nsamp, ntimes, nstep, dimX, dimY, Gmint, new_frequence, minSampleCount, latv, lonv, traveltime, traces) #hs t2 = time.time() Logfile.add('%s took %0.3f s' % ('CALC:', (t2 - t1))) #print 'K',k,len(k),' MUST ',ntimes*dimX*dimY,' RES ',k[1] partSemb = k #partSemb = partSemb.reshape (1,migpoints) partSemb = partSemb.reshape(ntimes, migpoints) #print 'PARTSEMB FLAG: ',partSemb,type(partSemb),partSemb.ndim return partSemb
def test_quorum(self): cm = None M = None C = None P = None try: cm = Cm.CM("test_quorum") cm.create_workspace() # M(1), S, S M = _init_pgs(0, 'localhost', 1900, cm.dir) M.smr.role_master(M.id, 1, 0) C = _init_pgs(1, 'localhost', 1910, cm.dir) C.smr.role_slave(C.id, 'localhost', M.base_port, 0) P = _init_pgs(2, 'localhost', 1920, cm.dir) P.smr.role_slave(P.id, 'localhost', M.base_port, 0) _attach_client(M, 1024, 1000) _attach_client(C, 1024, 1000) _attach_client(P, 1024, 1000) M.client.start(M.client.slotid) C.client.start(C.client.slotid) P.client.start(P.client.slotid) # Loop loopCount = 20 for i in range(0, loopCount): # M(1), S, S M.smr.setquorum(1) _check_progress([M, C, P]) # M(1), S, L P.smr.role_lconn() _check_progress([M, C]) # M(C), S, S ps = P.smr.getseq_log() P.smr.role_slave(P.id, 'localhost', M.base_port, ps['max']) _check_progress([M, C, P]) # M(C), S, L M.smr.setquorum(1, [C.id]) P.smr.role_lconn() _check_progress([M, C]) # M(C), S, S ps = P.smr.getseq_log() P.smr.role_slave(P.id, 'localhost', M.base_port, ps['max']) snap = _check_progress([M, C, P]) # M(C), L, S C.smr.role_lconn() _check_progress([M, P], noprogress=True) # M(C), S, S cs = C.smr.getseq_log() C.smr.role_slave(C.id, 'localhost', M.base_port, cs['max']) _check_progress([M, C, P]) loopCount = loopCount + 1 # Stop clients M.client.stop(M.client.slotid) C.client.stop(C.client.slotid) P.client.stop(P.client.slotid) finally: #Util.tstop('Check output!') if M is not None: M.kill() if C is not None: C.kill() if P is not None: P.kill() if cm is not None: cm.remove_workspace()
def test_logdelete(self): cm = None pgs1 = None try: cm = Cm.CM("test_logdelete") cm.create_workspace() pg = Pg.PG(0) # pgs --> master pgs = Pgs.PGS(0, 'localhost', 1900, cm.dir) pg.join(pgs, start=True) pgs.smr.wait_role(Smr.SMR.MASTER) # make lots of logs clients = [] num_clients = 20 for i in range(0, num_clients): C = Client.Client() clients.append(C) C.slotid = i C.add(C.slotid, 'localhost', 1909) C.size(64 * 1024, C.slotid) C.tps(100, C.slotid) for C in clients: C.start(C.slotid) runtime_limit = 15 runtime = 0 while runtime < runtime_limit: time.sleep(1) runtime = runtime + 1 for C in clients: C.stop(C.slotid) # checkpoint server pgs.be.ckpt() time.sleep(1.0) # delete log log_delete_seq = pgs.smr.deletelog(retain=0) assert (log_delete_seq > 0), log_delete_seq print("deltelog 0 returns", log_delete_seq) # log delete interval in idle state is 10 sec. (see bio.c) # after than, 1.0 0.9 0.8 ... 0.1, 0.1 ... logdel_timeout = 10 + log_delete_seq / (64 * 1024 * 1024) deltime = 0 while deltime < logdel_timeout: seqs = pgs.smr.getseq_log() # Note background delete spares one log file (see del_proc in bio.c) if seqs['min'] + 64 * 1024 * 1024 == log_delete_seq: break time.sleep(1) deltime = deltime + 1 assert (deltime < logdel_timeout), logdel_timeout finally: if pgs is not None: pgs.kill_smr() pgs.kill_be() if cm is not None: cm.remove_workspace()
def test_mem_disk_change (self): saved = Conf.USE_MEM_LOG Conf.USE_MEM_LOG = True cm = None pgs = None expected_seqs = {} try: cm = Cm.CM("tmpTestMemDiskChange") cm.create_workspace() pg = Pg.PG(0) pgs = Pgs.PGS(0, 'localhost', self.BASE_PORT, cm.dir) expected_seqs['min'] = 0 expected_seqs['max'] = 0 for i in range (0, 5): if i % 2 == 0: Log.createlog(pgs.dir) else: Log.deletelog(pgs.dir) # start replicator pgs.start_smr() pgs.smr.wait_role(Smr.SMR.NONE) # check seqs seqs = pgs.smr.getseq_log() print "expected =======>", expected_seqs print "seqs =======>", seqs assert seqs['min'] == expected_seqs['min'] assert seqs['max'] == expected_seqs['max'] # pgs -> master pgs.start_be() pgs.smr.wait_role(Smr.SMR.LCONN) pg.join(pgs) pgs.smr.wait_role(Smr.SMR.MASTER) # kill be self.make_some_logs(runtime=3) # wait for be to apply all logs time.sleep(1) # checkpoint pgs.be.ckpt() # remember original sequences expected_seqs = pgs.smr.getseq_log() # kill pgs pg.leave(pgs.id, kill=True) finally: #Util.tstop('Check output!') if pgs is not None: pgs.kill_smr() pgs.kill_be() if cm is not None: cm.remove_workspace() Conf.USE_MEM_LOG = saved
def test_logutil(self): cm = None master = None try: cm = Cm.CM("test_logutil") cm.create_workspace() pg = Pg.PG(0) # master --> master master = Pgs.PGS(0, 'localhost', 1900, cm.dir) pg.join(master, start=True) master.smr.wait_role(Smr.SMR.MASTER) # make some logs self.make_logs(master, 5) # get file sequences seqs = master.smr.getseq_log() file_seqs = [] seq = 0 while seq < seqs['max']: file_seqs.append(seq) seq = seq + 64*1024*1024 # get leading timestamp and sequence from each logs and peek one peeks = [] for seq in file_seqs: out, _ = Log.datadump_communicate(master.dir, seq, seq + 5*1024, 'Tsld32') lines = out.split('\n') assert len(lines) > 0 peek = lines[len(lines)/2] peeks.append(peek) # find by time range and check peek is in the lines idx = 0 for seq in file_seqs: peek = peeks[idx] ts = int(peek.split()[0]) assert ts > 20160817000000000 out, _ = Log.dumpbytime_communicate(master.dir, ts, ts+1, 'Tsld32') lines = out.split('\n') found = False for line in lines: if line == peek: found = True break assert found == True idx = idx + 1 # decachelog (just for coverage) out, _ = Log.decachelog_communicate(master.dir, file_seqs[-1], True) assert len(out) == 0 # mincorelog (just for coverage) out, _ = Log.mincorelog_communicate(master.dir) assert len(out.split('\n')) >= 5 finally: # Util.tstop('Check output!') if master is not None: master.kill_smr() master.kill_be() if cm is not None: cm.remove_workspace()