def test_basic(self): now = nanotime.now() a = Model('A') self.assertEqual(Model.key, Key('/Model')) self.assertEqual(a.key, Key('/Model:A')) self.assertEqual(Model.key, a.key.path) self.assertEqual(Model.key.instance('A'), a.key) self.assertEqual(a.__dstype__, 'Model') self.assertEqual(Model.__dstype__, 'Model') self.subtest_assert_uncommitted(a) a.commit() created = a.version.created print 'committed', a.version.hash self.assertFalse(a.isDirty()) self.assertTrue(a.isCommitted()) self.assertEqual(a.version.type, Model.__dstype__) self.assertEqual(a.version.hash, a.computedHash()) self.assertEqual(a.version.parent, Version.BLANK_HASH) self.assertEqual(a.version.created, created) self.assertEqual(a.created, a.version.created) self.assertEqual(a.committed, a.version.committed) self.assertTrue(a.created > now) self.assertTrue(a.committed > now) now = nanotime.now() a.commit() self.assertFalse(a.isDirty()) self.assertTrue(a.isCommitted()) self.assertEqual(a.version.type, Model.__dstype__) self.assertEqual(a.version.hash, a.computedHash()) self.assertEqual(a.version.parent, Version.BLANK_HASH) self.assertEqual(a.version.created, created) self.assertEqual(a.created, a.version.created) self.assertEqual(a.committed, a.version.committed) self.assertTrue(a.created < now) self.assertTrue(a.committed < now) # didn't REALLY commit. a._isDirty = True self.assertTrue(a.isDirty()) now = nanotime.now() a.commit() self.assertFalse(a.isDirty()) self.assertTrue(a.isCommitted()) self.assertEqual(a.version.type, Model.__dstype__) self.assertEqual(a.version.hash, a.computedHash()) self.assertEqual(a.version.parent, Version.BLANK_HASH) self.assertEqual(a.version.created, created) self.assertEqual(a.created, a.version.created) self.assertEqual(a.committed, a.version.committed) self.assertTrue(a.created < now) self.assertTrue(a.committed < now) # didn't REALLY commit.
def picture1(): st = nanotime.now() fig, ax = plt.subplots(3, 1) dft = df5mKline[['high', 'low']] # dft = resample('30min',dft) ch = Chan(True) for i in xrange(dft.shape[0]): ch.onNewKline(dft.iloc[i]) ax[0].set_title(u'原始数据') ax[1].set_title(u'处理包含关系并生成笔') ax[2].set_title(u'上图中的笔生成的线段') addVerticalLineDf(ax[0], dft, color='b') addVerticalLine(ax[1], ch._KlineList, color='cyan') addBrokenLine(ax[1], ch._PenPointList, color='b', linestyle="-") addBrokenLine(ax[2], ch._PenPointList, color='g', linestyle="-.") addBrokenLine(ax[2], [ch._PenPointList[lp.pidx] for lp in ch._LinePointList], False, color='b') #print ch._PenPointList print('ch cost:%d' % (nanotime.now() - st).milliseconds()) print ch._PenPointList plt.show()
def basic(jep, writeconcern={'slaveOk': False}): """ just add a record to our db """ print "running mongo test for host "+jep.host+" writeconcern "+str(writeconcern) session = prep_session(jep, writeconcern) ipt = iptables.Iptables(jep.host, jep) jep.history.set_checker(getattr(jep.mod, 'same')) rec = mk_rec(jep, -1, "insert", writeconcern) _id = None try: _id = session.insert(rec) except Exception as e: print jep.host+": "+str(e) i = 0 while i < jep.props['count']: blocked = ipt.split_unsplit_all(i, jep) try: rec = mk_rec(jep, i, "update", writeconcern) idx = jep.history.add(jep.host, "jepsen", rec, ipt.isblocked()) if _id != None: session.update({'_id':_id}, rec) jep.history.update(idx, {'end': nanotime.now()}) found = session.find_one({'_id':_id}) jep.history.update(idx, {'found': found, 'rawtime': nanotime.now()}) jep.history.printEvt(idx) except Exception as e: print jep.host+": "+str(e) i = i + 1 jep.pause()
def test_exact_update_checker(self): simulation = currentSimulation() checker = Exact_grid_non_bonded_update_checker(0.5) expected_true = [0,1] for i,file_name in enumerate(gb3_10_steps.gb3_files): PDBTool("test_data/gb3_10_steps/%s" % file_name).read() if i == 1: pos_10 = tuple(simulation.atomPos(10)) start = now() checker.update() end=now() result = checker.needs_update() #print i, file_name, result,checker, (end-start).seconds()*1000.0 if result == True: self.assertTrue(i in expected_true) pos = simulation.atomPos(10) pos[0] = pos_10[0]+0.50001 pos[1] = pos_10[1] pos[2] = pos_10[2] checker.update() result = checker.needs_update() self.assertTrue(result)
def picture1(): st = nanotime.now() fig, ax = plt.subplots(3, 1) dft = df5mKline[['high', 'low']] # dft = resample('30min',dft) ch = Chan(True) for i in xrange(dft.shape[0]): ch.onNewKline(dft.iloc[i]) ax[0].set_title(u'原始数据') ax[1].set_title(u'处理包含关系并生成笔') ax[2].set_title(u'上图中的笔生成的线段') addVerticalLineDf(ax[0], dft, color='b') addVerticalLine(ax[1], ch._KlineList, color='cyan') addBrokenLine(ax[1], ch._PenPointList, color='b', linestyle="-") addBrokenLine(ax[2], ch._PenPointList, color='g', linestyle="-.") addBrokenLine( ax[2], [ch._PenPointList[lp.pidx] for lp in ch._LinePointList], False, color='b') #print ch._PenPointList print('ch cost:%d' % (nanotime.now() - st).milliseconds()) print ch._PenPointList plt.show()
def basic(jep): """ basic test: write same key and read after """ global portmap, ports client = voldemort.StoreClient("test", [(jep.host, ports[jep.host])]) print "running basic test for host "+jep.host+" port "+str(ports[jep.host]) key = "jepsen_basic" ipt = iptables.Iptables(jep.host, jep) jep.history.set_checker(getattr(jep.mod, 'same')) i = 0 while i < jep.props['count']: blocked = ipt.split_unsplit_all(i, jep) try: value = jep.host+" "+str(i) idx = jep.history.add(jep.host, key, value, ipt.isblocked()) print "put "+key+" = "+value client.put(key, value) jep.history.update(idx, {'end': nanotime.now()}) found = client.get(key) # second element in found is the vectorclock object jep.history.update(idx, {'found': found[0][0], 'rawtime': nanotime.now()}) jep.history.printEvt(idx) except Exception as e: # print jep.host+" ("+str(i)+"): "+str(e) pass jep.pause() i = i + 1
def test_non_bonded_timing(self): binner = Non_bonded_bins(self.get_single_member_ensemble_simulation(),cutoff_distance=5.0) atom_ids = array('i',range(Segment_Manager().get_number_atoms())) start = now() binner.add_to_bins(atom_ids) stop = now() print (stop-start).seconds()*1000.0,'ms'
def test_new_fast_non_bonded_list_timing(self): nb_potential = self._get_xcamshift().get_named_sub_potential(NON_BONDED) new_calc, old_calc, non_bonded_lists, old_non_bonded_lists = self._create_naive_and_fast_non_bonded_lists(nb_potential) new_calc() non_bonded_lists.clear() start = now() new_calc() end = now() new_time = (end - start).seconds() old_calc() old_non_bonded_lists.clear() start = now() old_calc() end = now() old_time = (end - start).seconds() self.assertTrue(new_time < old_time) print 'new %4.3f ms / cycle' % (new_time*1000.0) print 'new %4.3f ms / cycle' % (old_time*1000.0)
def transaction(jep): """ Uses paxos CAS """ host = jep.host count = jep.props['count'] hosts = jep.hosts history = jep.history print "start transaction test on "+host canonical = tbname('transaction') session = prep_session(jep, canonical) sel = SimpleStatement( "SELECT stuff FROM %s WHERE id=0 LIMIT 1" % canonical, consistency_level=ConsistencyLevel.ALL ) ipt = iptables.Iptables(host, jep) jep.history.set_checker(getattr(jep.mod, 'sequence')) value = 'start' instr = "INSERT INTO %s (id, stuff) VALUES (0, '%s')" % (canonical, value) print jep.host+": "+instr ins = SimpleStatement( instr, consistency_level=ConsistencyLevel.QUORUM) session.execute(ins) i = 0 while i < count: blocked = ipt.split_unsplit_all(i, jep) value = value+" "+str(i) idx = history.add(host, 0, value, ipt.isblocked()) try: tries = 0 while tries < 100: try: row = session.execute(sel)[0] updstr = "UPDATE %s SET stuff='%s' WHERE id=0 IF stuff='%s'" % (canonical,value,row.stuff) print jep.host+" "+str(nanotime.now())+": "+updstr upd = SimpleStatement(updstr, consistency_level=ConsistencyLevel.QUORUM) res = session.execute(upd) break except Timeout as t: time.sleep(random.randint(0,100)/1e3) tries = tries + 1 history.update(idx, {'end': nanotime.now()}) except Exception as e: print jep.host+" write "+str(nanotime.now())+": "+str(e) history.update(idx, {'error': str(e)}) try: rows = session.execute(sel) history.update(idx, {'found': rows, 'rawtime': nanotime.now()}) history.printEvt(idx) except Exception as e: print jep.host+" read "+str(i)+" "+str(nanotime.now())+": "+str(e) history.update(idx, {'rawerror': str(e)}) jep.pause() i = i + 1
def no_picture(): st = nanotime.now() dft = df5mKline[['high', 'low']] ch = Chan(True) for i in xrange(dft.shape[0]): ch.onNewKline(dft.iloc[i]) print('ch cost:%d' % (nanotime.now() - st).milliseconds())
def test_construction(self): def eq(time1, time2): self.assertEqual(time1, time2) self.assertEqual(int(time1), time2._ns) self.assertEqual(time1._ns, int(time1)) self.assertEqual(time1._ns, int(time2)) self.assertEqual(time1._ns, time2._ns) self.assertEqual(time1.nanoseconds(), time2.nanoseconds()) self.assertEqual(time1.microseconds(), time2.microseconds()) self.assertEqual(time1.milliseconds(), time2.milliseconds()) self.assertEqual(time1.seconds(), time2.seconds()) self.assertEqual(time1.minutes(), time2.minutes()) self.assertEqual(time1.hours(), time2.hours()) self.assertEqual(time1.days(), time2.days()) self.assertEqual(time1.timestamp(), time2.timestamp()) self.assertEqual(time1.datetime(), time2.datetime()) self.assertEqual(time1.unixtime(), time2.unixtime()) #self.assertEqual(time1, time1.unixtime()) #self.assertEqual(time1, time2.unixtime()) #self.assertEqual(time1, time1.timestamp()) #self.assertEqual(time1, time2.timestamp()) #self.assertEqual(time2, time1.unixtime()) #self.assertEqual(time2, time2.unixtime()) #self.assertEqual(time2, time1.timestamp()) #self.assertEqual(time2, time2.timestamp()) def close(x, y, epsilon=1e-6): return abs(x - y) < epsilon for i in range(0, 50): # basic eq(nanotime.seconds(1), nanotime.nanoseconds(1e9)) eq(nanotime.seconds(1), nanotime.microseconds(1e6)) eq(nanotime.seconds(1), nanotime.milliseconds(1e3)) eq(nanotime.seconds(1), nanotime.seconds(1)) eq(nanotime.seconds(1), nanotime.minutes(1.0/60)) eq(nanotime.seconds(1), nanotime.hours(1.0/3600)) eq(nanotime.seconds(1), nanotime.days(1.0/(3600 * 24))) nt1 = nanotime.now() self.assertTrue(nanotime.now() > nt1) # timestamp ts1 = time.time() ts2 = nanotime.timestamp(ts1).timestamp() eq(nanotime.timestamp(ts1), nanotime.timestamp(ts1)) self.assertTrue(close(ts2, ts1)) self.assertEqual(nanotime.timestamp(ts1), ts1) # datetime dt1 = datetime.datetime.now() dt2 = nanotime.datetime(dt1).datetime() eq(nanotime.datetime(dt1), nanotime.datetime(dt1)) self.assertTrue(close(dt1, dt2, datetime.timedelta(microseconds=1)))
def no_picture(): st = nanotime.now() dft = df5mKline[['high', 'low']] tw = Twine(True) for i in xrange(dft.shape[0]): tw.onNewKline(dft.iloc[i]) print ('tw cost:%d' % (nanotime.now() - st).milliseconds()) print tw.getDf() print tw.getPen()
def basic(jep): """ just do a basic key value update to a table note that kingsbury's test monkeys with the timestamps for each host also need to check what value actually got written """ host = jep.host count = jep.props['count'] hosts = jep.hosts history = jep.history print "start basic test on "+host canonical = tbname('basic') session = prep_session(jep, canonical) fudge = random.randint(0, 1000) sel = SimpleStatement( "SELECT stuff FROM "+canonical+" WHERE id=0 LIMIT 1", consistency_level=ConsistencyLevel.ALL ) ipt = iptables.Iptables(host, jep) history.set_checker(getattr(jep.mod, 'samestuff')) i = 0 teststart = time.time() while i < count: i = i + 1 blocked = ipt.split_unsplit_all(i, jep) value = host+" "+str(time.time()) idx = history.add(host, 0, value, ipt.isblocked()) try: instr = "INSERT INTO %s (id, stuff) VALUES (%s, '%s') USING TIMESTAMP %d" \ % (canonical, 0, value, (nanotime.now().microseconds()+fudge)) print jep.host+": "+instr ins = SimpleStatement( instr, consistency_level=ConsistencyLevel.QUORUM) session.execute(ins) history.update(idx, {'end': nanotime.now()}) except Exception as e: print jep.host+" write "+str(nanotime.now())+": "+str(e) history.update(idx, {'error': str(e)}) try: rows = session.execute(sel) history.update(idx, {'found': rows, 'rawtime': nanotime.now()}) history.printEvt(idx) except Exception as e: print jep.host+" read "+str(nanotime.now())+": "+str(e) history.update(idx, {'raw error': str(e)}) jep.pause()
def commit(self): '''Committing a version creates a snapshot of the current changes.''' # this is actually broken for collection attributes: # if not self.isDirty(): # return # nothing to commit self.validate() sr = serial.SerialRepresentation() sr['hash'] = self.computedHash() if sr['hash'] == self._version.hash: self._isDirty = False return # false alarm, nothing to commit. sr['key'] = str(self.key) sr['type'] = self.__dstype__ sr['parent'] = self._version.hash sr['created'] = self._version.created.nanoseconds() sr['committed'] = nanotime.now().nanoseconds() sr['attributes'] = {} if sr['created'] == 0: # from blank version sr['created'] = sr['committed'] for attr_name, attr in self.attributes().iteritems(): sr['attributes'][attr_name] = serial.clean(attr.rawData(self)) self._version = Version(sr) self._isPersisted = True self._isDirty = False
def test_model(self): h1 = hashlib.sha1('derp').hexdigest() h2 = hashlib.sha1('herp').hexdigest() attrs = {'first' : {'value':'Herp'}, \ 'last' : {'value':'Derp'}, \ 'phone' : {'value': '123'}, \ 'age' : {'value': 19}, \ 'gender' : {'value' : 'Male'}} sr = serial.SerialRepresentation() sr['key'] = '/Person:PersonA' sr['hash'] = h1 sr['parent'] = h2 sr['created'] = nanotime.now().nanoseconds() sr['committed'] = sr['created'] sr['attributes'] = attrs sr['type'] = 'Person' ver = Version(sr) instance = Person(ver) self.assertEqual(instance.__dstype__, ver.type) self.assertEqual(instance.version, ver) self.assertFalse(instance.isDirty()) self.assertTrue(instance.isPersisted()) self.assertTrue(instance.isCommitted()) self.assertEqual(instance.key, Key('/Person:PersonA')) self.assertEqual(instance.first, 'Herp') self.assertEqual(instance.last, 'Derp') self.assertEqual(instance.phone, '123') self.assertEqual(instance.age, 19) self.assertEqual(instance.gender, 'Male')
def update(self, idx, op, result): """ interface to Results.update mangles the times so we get sensible read and write times given that these are out of order compared to the jepsen tests Results was designed for TODO: make Results general enough that it doesn't need this """ if idx in self.ops: if self.ops[idx] != op: self.ops[idx] = self.ops[idx]+" "+op else: self.ops[idx] = op now = nanotime.now() evt = self.events[idx] if self.ops[idx] == 'write': Results.update(self, idx, {'end': now, 'rawtime': now, 'found': result, 'notes': self.ops[idx]}) elif self.ops[idx] == 'read': Results.update(self, idx, {'end': evt.start, 'rawtime': now, 'found': result, 'notes': self.ops[idx]}) elif self.ops[idx] == 'read write': re = evt.rawelapsed() we = now - evt.rawtime wtime = evt.start + we rtime = wtime + re Results.update(self, idx, {'end': wtime, 'rawtime': rtime, 'found': result, 'notes': self.ops[idx]})
def setAttribute(self, instance, rawData, default=False): '''Called whenever this particular attribute is set to a new value.''' # update the update metadata to reflect the current time. if default: rawData['updated'] = 0 else: rawData['updated'] = nanotime.now().nanoseconds()
def test_creation(self): h1 = hashlib.sha1('derp').hexdigest() h2 = hashlib.sha1('herp').hexdigest() now = nanotime.now() sr = serial.SerialRepresentation() sr['key'] = '/A' sr['hash'] = h1 sr['parent'] = h2 sr['created'] = now.nanoseconds() sr['committed'] = now.nanoseconds() sr['attributes'] = {'str' : {'value' : 'derp'} } sr['type'] = 'Hurr' v = Version(sr) self.assertEqual(v.type, 'Hurr') self.assertEqual(v.hash, h1) self.assertEqual(v.parent, h2) self.assertEqual(v.created, now) self.assertEqual(v.committed, now) self.assertEqual(v.shortHash(5), h1[0:5]) self.assertEqual(v.attributeValue('str'), 'derp') self.assertEqual(v.attribute('str')['value'], 'derp') self.assertEqual(v['str']['value'], 'derp') self.assertEqual(hash(v), hash(fasthash.hash(h1))) self.assertEqual(v, Version(sr)) self.assertFalse(v.isBlank) self.assertRaises(KeyError, v.attribute, 'fdsafda') self.assertRaises(TypeError, cmp, v, 'fdsafda')
def subscribe(self): # unlimited json stream from oanda s, resp = self.connect(service="/v1/prices", instruments=self.universe, streaming=True) if resp.status_code != 200: print resp.text raise SystemExit for line in resp.iter_lines(1): if self.signal == 'stop': break elif self.signal == 'pause': continue if line: try: msg = json.loads(line) except KeyboardInterrupt: print "Ctrl+C pressed. Stopping..." except Exception as e: print ('Caught exception when converting ' 'message into json\n' + str(e)) raise SystemExit if 'tick' in msg: msg = msg['tick'] if 'instrument' in msg: dt = {} dt['ticker'] = msg['instrument'].replace('_', '') dt['timestamp'] = nanotime.now().timestamp() dt['quote_time'] = mktime( parse(msg['time']).utctimetuple()) dt['size'] = 0 dt['type_'] = 'QUOTE' for subtype in ('ask', 'bid'): dt['price'] = msg[subtype] dt['subtype'] = subtype.upper() dt['source'] = 'sandbox' dt['asset'] = 'Curncy' dt['dir'] = 0 dt['code'] = '' print nanotime.now() self.sending(dt, 'TICK') # send if self.verbose: print msg
def set(jep): """ Uses CQL sets """ print "starting set test for "+jep.host canonical = tbname('set_app') createtb = 'CREATE TABLE IF NOT EXISTS %s (id int PRIMARY KEY, s set<varchar>)' % canonical session = prep_session(jep, canonical, createtb) sel = SimpleStatement( "SELECT s FROM %s WHERE id=0 LIMIT 1" % canonical, consistency_level=ConsistencyLevel.ALL ) session.execute("INSERT into %s (id, s) VALUES (0,{})" % canonical) ipt = iptables.Iptables(jep.host, jep) jep.history.set_checker(getattr(jep.mod, 'same')) i = 0 while i < jep.props['count']: blocked = ipt.split_unsplit_all(i, jep) value = jep.host+" "+str(i) idx = jep.history.add(jep.host, 0, value, ipt.isblocked()) try: updstr = "UPDATE %s SET s = s + {'%s'} WHERE id=0" % (canonical, value) print jep.host+": "+updstr upd = SimpleStatement(updstr, consistency_level=ConsistencyLevel.ANY) session.execute(upd) jep.history.update(idx, {'end': nanotime.now()}) except Exception as e: print jep.host+" write "+str(nanotime.now())+": "+str(e) jep.history.update(idx, {'error': str(e)}) try: rows = session.execute(sel) for row in rows: if value in row.s: jep.history.update(idx, {'found': value, 'rawtime': nanotime.now()}) jep.history.printEvt(idx) except Exception as e: print jep.host+" read "+str(nanotime.now())+": "+str(e) jep.history.update(idx, {'rawerror': str(e)}) jep.pause() i = i + 1
def counter(jep): """ All writes are increments. Recovers [0...n] where n is the current value of the counter. """ print "starting counter test for "+jep.host canonical = tbname('counter_app') createtb = 'CREATE TABLE IF NOT EXISTS %s (id int PRIMARY KEY, k counter)' % canonical session = prep_session(jep, canonical, createtb) sel = SimpleStatement( "SELECT k FROM %s WHERE id=0 LIMIT 1" % canonical, consistency_level=ConsistencyLevel.ALL ) updstr = "UPDATE %s SET k = k + 1 WHERE id=0" % canonical upd = SimpleStatement(updstr, consistency_level=ConsistencyLevel.ONE) ipt = iptables.Iptables(jep.host, jep) # jep.history.set_checker(getattr(jep.mod, 'isonemore')) jep.history.set_checker(getattr(jep.mod, 'ismore')) i = 0 while i < jep.props['count']: blocked = ipt.split_unsplit_all(i, jep) try: value = session.execute(sel) idx = jep.history.add(jep.host, 0, value, ipt.isblocked()) try: print jep.host+": "+updstr session.execute(upd) jep.history.update(idx, {'end': nanotime.now()}) except Exception as e: print jep.host+" write "+str(nanotime.now())+": "+str(e) jep.history.update(idx, {'error': str(e)}) try: rows = session.execute(sel) jep.history.update(idx, {'found': rows, 'rawtime': nanotime.now()}) jep.history.printEvt(idx) except Exception as e: print jep.host+" read "+str(nanotime.now())+": "+str(e) jep.history.update(idx, {'rawerror': str(e)}) except Exception as e: print jep.host+" error getting value: "+str(e) jep.pause() i = i + 1
def faults(request, team): timeout = 10 try: #faults = Fault.objects.filter(victim = team, when__gte=datetime.now()-timedelta(seconds=timeout)).order_by('-id')[0] fault = Fault.objects.filter(victim = team).order_by('-id')[0] #if (fault.when - nanotime.now().seconds()) > timeout: if (fault.when) < nanotime.now().seconds() - timeout: fault = "1: 3 0 0" except: fault = "" #Fault(victim = team, ) context = {'faults' : fault} return render(request, 'faults.html', context)
def versions(): sr = serial.SerialRepresentation() sr['key'] = '/ABCD' sr['hash'] = hashlib.sha1('herp').hexdigest() sr['parent'] = Version.BLANK_HASH sr['created'] = nanotime.now().nanoseconds() sr['committed'] = nanotime.now().nanoseconds() sr['attributes'] = {'str' : {'value' : 'herp'} } sr['type'] = 'Hurr' v1 = Version(sr) sr = serial.SerialRepresentation() sr['key'] = '/ABCD' sr['hash'] = hashlib.sha1('derp').hexdigest() sr['parent'] = hashlib.sha1('herp').hexdigest() sr['created'] = nanotime.now().nanoseconds() sr['committed'] = nanotime.now().nanoseconds() sr['attributes'] = {'str' : {'value' : 'derp'} } sr['type'] = 'Hurr' v2 = Version(sr) sr = serial.SerialRepresentation() sr['key'] = '/ABCD' sr['hash'] = hashlib.sha1('lerp').hexdigest() sr['parent'] = hashlib.sha1('derp').hexdigest() sr['created'] = nanotime.now().nanoseconds() sr['committed'] = nanotime.now().nanoseconds() sr['attributes'] = {'str' : {'value' : 'lerp'} } sr['type'] = 'Hurr' v3 = Version(sr) return v1, v2, v3
def version_objects(): sr1 = {} sr1["key"] = "/ABCD" sr1["hash"] = hashlib.sha1("herp").hexdigest() sr1["parent"] = "0000000000000000000000000000000000000000" sr1["created"] = nanotime.now().nanoseconds() sr1["committed"] = nanotime.now().nanoseconds() sr1["attributes"] = {"str": {"value": "herp"}} sr1["type"] = "Hurr" sr2 = {} sr2["key"] = "/ABCD" sr2["hash"] = hashlib.sha1("derp").hexdigest() sr2["parent"] = hashlib.sha1("herp").hexdigest() sr2["created"] = nanotime.now().nanoseconds() sr2["committed"] = nanotime.now().nanoseconds() sr2["attributes"] = {"str": {"value": "derp"}} sr2["type"] = "Hurr" sr3 = {} sr3["key"] = "/ABCD" sr3["hash"] = hashlib.sha1("lerp").hexdigest() sr3["parent"] = hashlib.sha1("derp").hexdigest() sr3["created"] = nanotime.now().nanoseconds() sr3["committed"] = nanotime.now().nanoseconds() sr3["attributes"] = {"str": {"value": "lerp"}} sr3["type"] = "Hurr" return sr1, sr2, sr3
def version_objects(): sr1 = {} sr1['key'] = '/ABCD' sr1['hash'] = hashlib.sha1('herp').hexdigest() sr1['parent'] = '0000000000000000000000000000000000000000' sr1['created'] = nanotime.now().nanoseconds() sr1['committed'] = nanotime.now().nanoseconds() sr1['attributes'] = {'str' : {'value' : 'herp'} } sr1['type'] = 'Hurr' sr2 = {} sr2['key'] = '/ABCD' sr2['hash'] = hashlib.sha1('derp').hexdigest() sr2['parent'] = hashlib.sha1('herp').hexdigest() sr2['created'] = nanotime.now().nanoseconds() sr2['committed'] = nanotime.now().nanoseconds() sr2['attributes'] = {'str' : {'value' : 'derp'} } sr2['type'] = 'Hurr' sr3 = {} sr3['key'] = '/ABCD' sr3['hash'] = hashlib.sha1('lerp').hexdigest() sr3['parent'] = hashlib.sha1('derp').hexdigest() sr3['created'] = nanotime.now().nanoseconds() sr3['committed'] = nanotime.now().nanoseconds() sr3['attributes'] = {'str' : {'value' : 'lerp'} } sr3['type'] = 'Hurr' return sr1, sr2, sr3
def version_objects(): sr1 = {} sr1['key'] = '/ABCD' sr1['hash'] = hashlib.sha1('herp').hexdigest() sr1['parent'] = '0000000000000000000000000000000000000000' sr1['created'] = nanotime.now().nanoseconds() sr1['committed'] = nanotime.now().nanoseconds() sr1['attributes'] = {'str': {'value': 'herp'}} sr1['type'] = 'Hurr' sr2 = {} sr2['key'] = '/ABCD' sr2['hash'] = hashlib.sha1('derp').hexdigest() sr2['parent'] = hashlib.sha1('herp').hexdigest() sr2['created'] = nanotime.now().nanoseconds() sr2['committed'] = nanotime.now().nanoseconds() sr2['attributes'] = {'str': {'value': 'derp'}} sr2['type'] = 'Hurr' sr3 = {} sr3['key'] = '/ABCD' sr3['hash'] = hashlib.sha1('lerp').hexdigest() sr3['parent'] = hashlib.sha1('derp').hexdigest() sr3['created'] = nanotime.now().nanoseconds() sr3['committed'] = nanotime.now().nanoseconds() sr3['attributes'] = {'str': {'value': 'lerp'}} sr3['type'] = 'Hurr' return sr1, sr2, sr3
def save_image(self, image, binary_sub_images, char_code): seq = str(int(nanotime.now())) image_name = self.named_dir + '_'.join( [str(ord(char)) for char in char_code]) + '.' + seq + '.png' image.save(image_name) for i in range(len(char_code)): train_dir = self.label_dir + str(ord(char_code[i])) + '//' if (not os.path.exists(train_dir)): os.makedirs(train_dir) binary_sub_images[i].save(train_dir + seq + '.png')
def make_points_from_tuple_or_number(object): t = object if isinstance(object, Iterable) else [object] # tuple or number value = t fields = dict( map(lambda x: (x, value[fields_mapping[x]]), fields_mapping.keys())) # fields = {fields_mapping[index]: value for index, value in enumerate(t)} return [{ "measurement": measurement, "fields": fields, "time": nanotime.now().nanoseconds() }]
def test_object(self): t1 = nanotime.now() t2 = nanotime.now() f1 = Filter("key", ">", "/A") f2 = Filter("key", "<", "/A") f3 = Filter("committed", "=", t1) f4 = Filter("committed", ">=", t2) self.assertEqual(f1, eval(repr(f1))) self.assertEqual(f2, eval(repr(f2))) self.assertEqual(f3, eval(repr(f3))) self.assertEqual(f4, eval(repr(f4))) self.assertEqual(str(f1), "key > /A") self.assertEqual(str(f2), "key < /A") self.assertEqual(str(f3), "committed = %s" % t1) self.assertEqual(str(f4), "committed >= %s" % t2) self.assertEqual(f1, Filter("key", ">", "/A")) self.assertEqual(f2, Filter("key", "<", "/A")) self.assertEqual(f3, Filter("committed", "=", t1)) self.assertEqual(f4, Filter("committed", ">=", t2)) self.assertNotEqual(f2, Filter("key", ">", "/A")) self.assertNotEqual(f1, Filter("key", "<", "/A")) self.assertNotEqual(f4, Filter("committed", "=", t1)) self.assertNotEqual(f3, Filter("committed", ">=", t2)) self.assertEqual(hash(f1), hash(Filter("key", ">", "/A"))) self.assertEqual(hash(f2), hash(Filter("key", "<", "/A"))) self.assertEqual(hash(f3), hash(Filter("committed", "=", t1))) self.assertEqual(hash(f4), hash(Filter("committed", ">=", t2))) self.assertNotEqual(hash(f2), hash(Filter("key", ">", "/A"))) self.assertNotEqual(hash(f1), hash(Filter("key", "<", "/A"))) self.assertNotEqual(hash(f4), hash(Filter("committed", "=", t1))) self.assertNotEqual(hash(f3), hash(Filter("committed", ">=", t2)))
def test_object(self): t1 = nanotime.now() t2 = nanotime.now() f1 = Filter('key', '>', '/A') f2 = Filter('key', '<', '/A') f3 = Filter('committed', '=', t1) f4 = Filter('committed', '>=', t2) self.assertEqual(f1, eval(repr(f1))) self.assertEqual(f2, eval(repr(f2))) self.assertEqual(f3, eval(repr(f3))) self.assertEqual(f4, eval(repr(f4))) self.assertEqual(str(f1), 'key > /A') self.assertEqual(str(f2), 'key < /A') self.assertEqual(str(f3), 'committed = %s' % t1) self.assertEqual(str(f4), 'committed >= %s' % t2) self.assertEqual(f1, Filter('key', '>', '/A')) self.assertEqual(f2, Filter('key', '<', '/A')) self.assertEqual(f3, Filter('committed', '=', t1)) self.assertEqual(f4, Filter('committed', '>=', t2)) self.assertNotEqual(f2, Filter('key', '>', '/A')) self.assertNotEqual(f1, Filter('key', '<', '/A')) self.assertNotEqual(f4, Filter('committed', '=', t1)) self.assertNotEqual(f3, Filter('committed', '>=', t2)) self.assertEqual(hash(f1), hash(Filter('key', '>', '/A'))) self.assertEqual(hash(f2), hash(Filter('key', '<', '/A'))) self.assertEqual(hash(f3), hash(Filter('committed', '=', t1))) self.assertEqual(hash(f4), hash(Filter('committed', '>=', t2))) self.assertNotEqual(hash(f2), hash(Filter('key', '>', '/A'))) self.assertNotEqual(hash(f1), hash(Filter('key', '<', '/A'))) self.assertNotEqual(hash(f4), hash(Filter('committed', '=', t1))) self.assertNotEqual(hash(f3), hash(Filter('committed', '>=', t2)))
def set_headers(self, token='', role='ADMIN', algorithm='sha', type='jwt,mac'): current_time = int(nanotime.now()) headers_content = "user_id=%s" \ "&api_key=%s" \ "&token=%s" \ "&role=%s" \ "&algorithm=%s" \ "&type=%s" \ "×tamp=%s" % (self.user_id, self.api_key, token, role, algorithm, type, current_time) self.headers = {'live_drone_map': headers_content}
def check_for_new_bpm(): """This queries the DB for a user to see when the last bpm data refresh was for that user, and if the last pull was > 24 hours ago, it calls fetch_data to add recent bpm data for that user. The user is currently hardcoded to me.""" dbsession = connect() result = dbsession.execute( 'select * from "HRDataPoints" order by start_datetime desc limit 1') latest_datapoint = result.first() latest_timestamp = int(latest_datapoint.end_time) now_in_nanotime = nanotime.now() # If the timestamp on the most recent datapoint is more than a day old, call Google for updated data if latest_timestamp < (int(nanotime.now()) - DAY_IN_NANOSECS): endbound = str(int(nanotime.now()) ) # Get now in nanotime for the endbound of the dataset # convert latest_timestamp to int so I can increment it up a second int_latest_timestamp = int(latest_timestamp) int_latest_timestamp += 1000000000 latest_timestamp = str(int_latest_timestamp) new_data = foa.fetch_data(data_type='bpm', startbound=latest_datapoint.end_time, endbound=endbound) try: data_dict = json.loads(new_data) except: print "This is what new_data looks like: ", new_data return "There was an unexpected error." data_point_store.save_to_db(new_data) return True else: return False
def make_points_from_partition(iterator): points = [] for t in iterator: tags = dict(map(lambda x, y: (x, y), key_field, t[0])) value = t[1:] fields = dict( map(lambda x: (x, value[fields_mapping[x]]), fields_mapping.keys())) points.append({ "measurement": measurement, "fields": fields, "time": nanotime.now().nanoseconds(), "tags": tags }) return points
def basic(jep): global table, tb_name, key # how do we handle conflicts? # false = raise an exception if a record is overwritten before a write commits if 'overwrite' in jep.props: overwrite = jep.props['overwrite'] else: overwrite = False print "running basic test for dynamodb on table "+tb_name try: item = table.get_item(key=key) except Exception as e: print "failed to get item: "+str(e) item = table.put_item(data={'key':key,'stuff':'new'}) item = table.get_item(key=key) print "made new item" print str(item) jep.history.set_checker(getattr(jep.mod, 'same')) i = 0 while i < jep.props['count']: i = i + 1 try: value = jep.host+" "+str(nanotime.now()) idx = jep.history.add(jep.host,0,value) print "making stuff "+value item['stuff'] = value item.save(overwrite=overwrite) jep.history.update(idx, { "end": nanotime.now() }) item = table.get_item(key=key) jep.history.update(idx, { "rawtime": nanotime.now(), "found": item['stuff'] }) jep.history.printEvt(idx) jep.pause() except Exception as e: print jep.host+" ("+str(i)+"): "+str(e)
def test_raises(self): sr = serial.SerialRepresentation() self.assertRaises(ValueError, Version, sr) sr['key'] = '/A' self.assertRaises(ValueError, Version, sr) sr['hash'] = 'a' self.assertRaises(ValueError, Version, sr) sr['parent'] = 'b' self.assertRaises(ValueError, Version, sr) sr['created'] = nanotime.now().nanoseconds() self.assertRaises(ValueError, Version, sr) sr['committed'] = 0 self.assertRaises(ValueError, Version, sr) sr['committed'] = sr['created'] self.assertRaises(ValueError, Version, sr) sr['attributes'] = {'str' : 'derp'} self.assertRaises(ValueError, Version, sr) sr['type'] = 'Hurr' Version(sr)
def split_named_image(self): file_names = os.listdir(self.source_dir) for file_name in file_names: file_path = os.path.join(self.source_dir, file_name) print(file_path) file_dirs = file_name.split('.')[0].split('_') source_image = Image.open(file_path) images = self.captcha_handler.handle(source_image) if len(file_dirs) != len(images): print(file_path, 'handle failed.') else: for index in range(len(images)): sub_dir_path = os.path.join(self.target_dir, file_dirs[index]) if not os.path.exists(sub_dir_path): os.mkdir(sub_dir_path) images[index].save( os.path.join(sub_dir_path, str(int(nanotime.now())) + '.png')) source_image.close()
def subscribe(self, fields='LAST_TRADE,BID,ASK'): self.connect() subscriptions = blpapi.SubscriptionList() for security in self.universe: subscriptions.add(security, # any changes to the fields will trigger update fields, "", blpapi.CorrelationId(security)) self.session.subscribe(subscriptions) # Process received events while self.signal != 'stop': # We provide timeout to give the chance to Ctrl+C handling: event = self.session.nextEvent(timeout=500) # 0.5 second timeout if self.signal == 'pause': continue for msg in event: # if event.eventType() == blpapi.Event.SUBSCRIPTION_STATUS: if event.eventType() == blpapi.Event.SUBSCRIPTION_DATA: # get the timestamp first rec_time = nanotime.now().timestamp() t0 = timeflag() dt = parse_BBG_stream(msg, mode='dict') if dt is None: continue dt['quote_time'] = to_timestamp(dt['quote_time']) dt['timestamp'] = rec_time delta = (timeflag() - t0) * 1e6 if self.verbose: print 'STREAM latency: %f %ss' % (delta, miu) self.sending(dt, 'TICK')
def test_basic(self): now = nanotime.now().nanoseconds() q1 = Query(Key('/'), limit=100) q2 = Query(Key('/'), offset=200) q3 = Query(Key('/'), object_getattr=getattr) q1.offset = 300 q3.limit = 1 q1.filter('key', '>', '/ABC') q1.filter('created', '>', now) q2.order('key') q2.order('-created') q1d = {'key': '/', 'limit':100, 'offset':300, \ 'filter': [['key', '>', '/ABC'], ['created', '>', now]] } q2d = {'key': '/', 'offset':200, 'order': ['+key', '-created'] } q3d = {'key': '/', 'limit':1} self.assertEqual(q1.dict(), q1d) self.assertEqual(q2.dict(), q2d) self.assertEqual(q3.dict(), q3d) self.assertEqual(q1, Query.from_dict(q1d)) self.assertEqual(q2, Query.from_dict(q2d)) self.assertEqual(q3, Query.from_dict(q3d)) self.assertEqual(q1, eval(repr(q1))) self.assertEqual(q2, eval(repr(q2))) self.assertEqual(q3, eval(repr(q3))) self.assertEqual(q1, q1.copy()) self.assertEqual(q2, q2.copy()) self.assertEqual(q3, q3.copy())
def test_basic(self): now = nanotime.now().nanoseconds() q1 = Query(Key('/'), limit=100) q2 = Query(Key('/'), offset=200) q3 = Query(Key('/'), object_getattr=getattr) q1.offset = 300 q3.limit = 1 q1.filter('key', '>', '/ABC') q1.filter('created', '>', now) q2.order('key') q2.order('-created') q1d = {'key': '/', 'limit':100, 'offset':300, \ 'filter': [['key', '>', '/ABC'], ['created', '>', now]] } q2d = {'key': '/', 'offset': 200, 'order': ['+key', '-created']} q3d = {'key': '/', 'limit': 1} self.assertEqual(q1.dict(), q1d) self.assertEqual(q2.dict(), q2d) self.assertEqual(q3.dict(), q3d) self.assertEqual(q1, Query.from_dict(q1d)) self.assertEqual(q2, Query.from_dict(q2d)) self.assertEqual(q3, Query.from_dict(q3d)) self.assertEqual(q1, eval(repr(q1))) self.assertEqual(q2, eval(repr(q2))) self.assertEqual(q3, eval(repr(q3))) self.assertEqual(q1, q1.copy()) self.assertEqual(q2, q2.copy()) self.assertEqual(q3, q3.copy())
# -*- coding: utf-8 -*- __author__ = 'xujh' import nanotime from pylab import * import pandas as pd import os from chan import * mpl.rcParams['font.sans-serif'] = ['SimHei'] mpl.rcParams['figure.subplot.top'] = 0.96 mpl.rcParams['figure.subplot.bottom'] = 0.03 mpl.rcParams['figure.subplot.left'] = 0.03 mpl.rcParams['figure.subplot.right'] = 0.98 st = nanotime.now() hqdatadir = 'D:\TdxW_HuaTai\T0002\export2' code = '999999' # code = '399006' filepath = os.path.join(hqdatadir, (code + '.txt')) rnames = ['d', 't', 'open', 'high', 'low', 'close', 'volume', 'amt'] df5mKline = pd.read_table(filepath, engine='python', sep=',', encoding='gbk', names=rnames, parse_dates={'time': ['d', 't']}, index_col='time', skiprows=2, skipfooter=1)
def now_nano(self): return nanotime.now().nanoseconds()
def bg_cb(sess, resp): # Print response and round-trip time for POST operation print("ID: " + STREAM_ID + ", Response: " + str(resp.status_code) + ", Delta: " + str(nanotime.now().milliseconds() - t))
def on_fill(self, order, kwargs): self.count_order( (nanotime.now() - self._cache_order_time).nanoseconds())
def create_protocol_struct(info_obj, protocol_head, protocol_type, data_obj, **additions): if protocol_head == PROTOCOL_HEAD_DCCP: protocol_data = PROTOCOL_DCCP_HEAD_STRUCT() protocol_data.protocol_type = protocol_head if "opt" in additions.keys(): protocol_data.protocol_opt = additions["opt"] else: protocol_data.protocol_opt = 0 if "iack" in additions.keys(): protocol_data.protocol_iack = additions["iack"] else: protocol_data.protocol_iack = 0 protocol_data.protocol_rsv = 0 protocol_data.protocol_sub_type = protocol_type protocol_data.protocol_win_size = "ffff" protocol_data.protocol_timestamp = (nanotime.now().nanoseconds() / 1000) & 0xffffffff if "delay" in additions.keys(): protocol_data.protocol_time_delay = additions["delay"] else: protocol_data.protocol_time_delay = protocol_data.protocol_timestamp - info_obj.nanotime_number info_obj.nanotime_number = protocol_data.protocol_timestamp protocol_data.protocol_seqno = info_obj.session_sequence_number protocol_data.protocol_ackno = info_obj.session_ack_number protocol_data.protocol_data = data_obj return protocol_data elif protocol_head == PROTOCOL_DCCP_SYNC: protocol_data = PROTOCOL_DCCP_SYNC_STRUCT() protocol_data.data_peerid = info_obj.peer_id return protocol_data elif protocol_head == PROTOCOL_DCCP_SYNACK: pass elif protocol_head == PROTOCOL_DCCP_DATA: protocol_data = PROTOCOL_DCCP_DATA_STRUCT() protocol_data.data_cccp_obj = data_obj return protocol_data elif protocol_head == PROTOCOL_DCCP_ACK: protocol_data = PROTOCOL_DCCP_ACK_STRUCT() if "whyack" in additions.keys(): protocol_data.data_whyack = additions["whyack"] else: protocol_data.data_whyack = 5 return protocol_data elif protocol_head == PROTOCOL_DCCP_FIN: protocol_data = PROTOCOL_DCCP_FIN_STRUCT() protocol_data.data_code = 0 return protocol_data elif protocol_head == PROTOCOL_CCCP_PUSH_STREAM_REQ: protocol_data = PROTOCOL_CCCP_HEAD_STRURCT() protocol_data.protocol_type = PROTOCOL_CCCP_PUSH_STREAM_REQ protocol_data.protocol_data = PROTOCOL_CCCP_PUSH_STREAM_REQ_STRUCT() protocol_data.protocol_data.request_id = info_obj.request_id protocol_data.protocol_data.peer_id = info_obj.peer_id protocol_data.protocol_data.file_id = info_obj.file_id protocol_data.protocol_data.file_url = info_obj.file_url protocol_data.protocol_data.cppc_number = 1 return protocol_data elif protocol_head == PROTOCOL_CCCP_PUSH_STREAM_RSP: pass elif protocol_head == PROTOCOL_CCCP_PUSH_PIECE_DATA: pass elif protocol_head == PROTOCOL_CCCP_PUSH_STREAM_FIN: protocol_data = PROTOCOL_CCCP_HEAD_STRURCT() protocol_data.protocol_type = PROTOCOL_CCCP_PUSH_STREAM_FIN protocol_data.protocol_data = PROTOCOL_CCCP_PUSH_STREAM_FIN_STRUCT() protocol_data.protocol_data.request_id = info_obj.request_id protocol_data.protocol_data.file_id = info_obj.file_id protocol_data.protocol_data.status = 0 return protocol_data else: return None
# InfluxDB server details INFLUX_URL = 'http://172.16.132.86' INFLUX_PORT = '8086' INFLUX_DB_NAME = 'ecg_stream_test' # How many points to batch for each POST BATCH_AMOUNT = 2000 # Simulate a unique stream_id (socket.gethostname() returns your computer name) STREAM_ID = ''.join(e for e in socket.gethostname() if e.isalnum()) # Pick a file from the data directory FILE_NAME = 'mgh001.csv' session = FuturesSession() t = nanotime.now().milliseconds() # Called when POST completes def bg_cb(sess, resp): # Print response and round-trip time for POST operation print("ID: " + STREAM_ID + ", Response: " + str(resp.status_code) + ", Delta: " + str(nanotime.now().milliseconds() - t)) with open('../data/' + FILE_NAME, 'rt') as f: reader = csv.reader(f) count = 0 s = '' for row in reader: count += 1
def on_order(self, order, kwargs): # TODO this is a temporary method self._cache_order_time = nanotime.now()
def get_response_time(query): before = nanotime.now() validate_MAC(query) after = nanotime.now() return int(after - before)
def get_nanotime(): return int(nanotime.now().unixtime() * 1000000000)
def on_bar_start(self, bar, kwargs): kwargs["timestamp_bar_start"] = nanotime.now()
def on_bar_end(self, bar, kwargs): kwargs["timestamp_bar_end"] = nanotime.now() self.count_bar((kwargs["timestamp_bar_end"] - kwargs["timestamp_bar_start"]).nanoseconds())
def on_tick_start(self, tick, kwargs): kwargs["timestamp_tick_start"] = nanotime.now()
def on_tick_end(self, tick, kwargs): kwargs["timestamp_tick_end"] = nanotime.now() self.count_tick(kwargs["timestamp_tick_end"] - kwargs["timestamp_tick_start"])