def calendar_items(self, month, monthrange): if month < 10: mn = "0%i" % month else: mn = month d1 = datetime.datetime.strptime( ("%i-%s-01 00:00:00" % (self.year, mn)), "%Y-%m-%d %H:%M:%S") d2 = datetime.datetime.strptime( ("%i-%s-%i 23:59:59" % (self.year, mn, monthrange)), "%Y-%m-%d %H:%M:%S") start = long(time.mktime(d1.timetuple()) + 1e-6 * d1.microsecond) * 1000 end = long(time.mktime(d2.timetuple()) + 1e-6 * d2.microsecond) * 1000 all_logs = self.conn.getEventsByPeriod(start, end, self.eid) items = dict() for d in range(1, monthrange + 1): items[d] = list() for i in all_logs: for d in items: if time.gmtime(i.event.time.val / 1000).tm_mday == d: items[d].append({ "id": i.entityId.val, "type": i.entityType.val, "action": i.action.val, }) return items
def __init__(self, device_id=0, stream_resolution=(640, 480), video_converter_resolution=(640, 480), frame_duration_range=(long(1e9 // 30), long(1e9 // 30)), exposure_time_range=(long(0), long(999999999)), source_clip_rect=(0.0, 0.0, 1.0, 1.0), gain_range=(0., 0.), sensor_mode=0): self.device_id = device_id self.video_converter_resolution = video_converter_resolution self.config = DEFAULT_DEVKIT_CONFIG() self.config.setDeviceId(device_id) self.config.setStreamResolution(stream_resolution) self.config.setVideoConverterResolution(video_converter_resolution) self.config.setFrameDurationRange(frame_duration_range) self.config.setExposureTimeRange(exposure_time_range) self.config.setGainRange(gain_range) self.config.setSourceClipRect(source_clip_rect) self.config.setExposureCompensation(0) self.config.setAeLock(True) #self.config.setSensorMode(sensor_mode) self.channels = 4 self.camera = IArgusCamera_createArgusCamera(self.config)
def list_tests(self): """List currently known test types. A new 'test type' is created each time openhtf.Test is instantiated, and lasts as long as there are any external references to it (ie, outside the internal tracking structures within OpenHTF). This means creating large numbers of openhtf.Test instances and keeping references to them around can cause memory usage to grow rapidly and station_api performance to degrade; don't do that. Returns: List of RemoteTest tuple values (as a dict). Only currently-executing tests are returned. """ retval = [{ 'test_uid': test.uid, 'test_name': test.get_option('name'), 'created_time_millis': long(test.created_time_millis), 'last_run_time_millis': test.last_run_time_millis and long(test.last_run_time_millis), } for test in openhtf.Test.TEST_INSTANCES.values() if test.uid is not None] _LOG.debug('RPC:list_tests() -> %s results', len(retval)) return retval
def data2key(data): data = utils._parse_ssh_key(data) _, exp, mod = ( next(data), # noqa: F841 (_) long(utils.encode_hex(next(data)), 16), long(utils.encode_hex(next(data)), 16)) return _rsa_construct(exp, mod)
def closestFloorValueNumpy(val, lst): ''' return the closest previous value to where val should be in lst (or val) please use numpy.array for lst PERF ANOUNCEMENT - AFTER TESTING you are better using numpy.array, 15x for [] for type(lst) than array.array (x22) array.array is bad algo perf.... ''' # Find indices where elements should be inserted to maintain order. if isinstance(lst, list): # TODO delete log.warning('misuse of closestFloorValue') try: # be positive, its a small hit compared to searchsorted on # non-numpy array return lst.index(val) except ValueError as e: pass return closestFloorValueOld(val, lst) indicetab = numpy.searchsorted(lst, [val]) ind = int(indicetab[0]) if ind < len(lst): if long(lst[ind]) == val: return long(lst[ind]), ind if ind == 0: raise ValueError('Value %0x is under minimum' % val) i = ind - 1 return long(lst[i]), i
def to_python(self, value): if self.required and not value: raise ValidationError(self.error_messages["required"]) elif not self.required and not value: return [] if not isinstance(value, (list, tuple)): raise ValidationError(self.error_messages["list"]) final_values = [] for val in value: try: long(val) except Exception: raise ValidationError(self.error_messages["invalid_choice"]) else: res = False for q in self.queryset: if hasattr(q.id, "val"): if long(val) == q.id.val: res = True else: if long(val) == q.id: res = True if not res: raise ValidationError( self.error_messages["invalid_choice"]) else: final_values.append(val) return final_values
def to_timestamp(self, d): '''Return an integer that corresponds to the Unix timestamp, i.e. number of milliseconds since epoch. This method converts both datetime.datetime and numpy.datetime64 objects. ''' if d is None: return d if six.PY2: if isinstance(d, long): # compat with python2 long from datetime.datetime d = d / 1000000000 return long(d) if isinstance(d, numpy.datetime64): if str(d) == "NaT": return None d = d.astype(datetime) if six.PY2: if isinstance(d, long): return long(round(d / 1000000)) if isinstance(d, int): # sometimes `astype(datetime)` returns an int timestamp in nanoseconds - parse this. return round(d / 1000000) # Convert `datetime.datetime` and `pandas.Timestamp` to millisecond timestamps return int((time.mktime(d.timetuple()) + d.microsecond / 1000000.0) * 1000)
def read_url(self, path, offset=0, length=None, bufsize=None): """ read(path, offset, length[, bufsize]) -> data Read data from a file. """ path = self.strip_normpath(path) params = self._getparams() params['op'] = 'OPEN' params['offset'] = long(offset) if length is not None: params['length'] = long(length) if bufsize is not None: params['bufsize'] = bufsize if self._security_enabled: token = self.get_delegation_token(self.user) if token: params['delegation'] = token # doas should not be present with delegation token as the token includes the username # https://hadoop.apache.org/docs/r1.0.4/webhdfs.html if 'doas' in params: del params['doas'] if 'user.name' in params: del params['user.name'] quoted_path = urllib_quote(smart_str(path)) return self._client._make_url(quoted_path, params)
def testConversionMethod(self): assert None == rtype(None) assert rlong(1) == rtype(rlong(1)) # Returns self assert rbool(True) == rtype(True) # Unsupported # assert rdouble(0) == rtype(Double.valueOf(0)) assert rfloat(0) == rtype(float(0)) if sys.version_info < (3, 0, 0): assert rlong(0) == rtype(long(0)) else: assert rint(0) == rtype(long(0)) assert rint(0) == rtype(int(0)) assert rstring("string") == rtype("string") # Unsupported # assert rtime(time) == rtype(new Timestamp(time)) rtype(omero.model.ImageI()) rtype(omero.grid.JobParams()) rtype(set([rlong(1)])) rtype(list([rlong(2)])) rtype({}) # Unsupported # rtype(array) try: rtype(()) assert False, "Shouldn't be able to handle this yet" except omero.ClientError: pass
def getkeys(self, host): for script in self.getscripts(host): key = script["script"][self.scriptid]['pubkey'] yield Key( host['addr'], script["port"], "ssl", key['type'], key['bits'], _rsa_construct(long(key['exponent']), long(key['modulus'])), utils.decode_hex(script["script"][self.scriptid]['md5']))
def getDiskSpace(path='.'): """ Get the free disk space in the partition containing the path. The disk space is reported in MBytes. Returned 0 in case of any error, e.g. path does not exist """ if not os.path.exists(path): return -1 comm = 'df -P -m %s | tail -1' % path resultDF = shellCall(10, comm) if resultDF['OK'] and not resultDF['Value'][0]: output = resultDF['Value'][1] if output.find(' /afs') >= 0: # AFS disk space comm = 'fs lq | tail -1' resultAFS = shellCall(10, comm) if resultAFS['OK'] and not resultAFS['Value'][0]: output = resultAFS['Value'][1] fields = output.split() quota = long(fields[1]) used = long(fields[2]) space = (quota - used) / 1024 return int(space) else: return -1 else: fields = output.split() try: value = int(fields[3]) except Exception as error: print("Exception during disk space evaluation:", str(error)) value = -1 return value else: return -1
def data2key(data): data = utils.parse_ssh_key(data) _, exp, mod = ( next(data), # noqa: F841 (_) long(utils.encode_hex(next(data)), 16), long(utils.encode_hex(next(data)), 16)) return RSA.construct((mod, exp))
def getkeys(self, record): yield Key( record['addr'], record["port"], "ssh", record['infos']['algo'][4:], record['infos']['bits'], RSA.construct((long(record['infos']['modulus']), long(record['infos']['exponent']))), utils.decode_hex(record['infos']['md5']))
def checkValue(q, value): if not hasattr(q, 'id'): return False if hasattr(q.id, 'val'): if long(value) == q.id.val: return True if long(value) == q.id: return True
def test_distanceCalculation(self): ridone = hashlib.sha1(os.urandom(32)) ridtwo = hashlib.sha1(os.urandom(32)) shouldbe = long(ridone.hexdigest(), 16) ^ long(ridtwo.hexdigest(), 16) none = Node(ridone.digest()) ntwo = Node(ridtwo.digest()) self.assertEqual(none.distanceTo(ntwo), shouldbe)
def __removeRequests(self, requestIDs): """ This will remove requests from the RMS system - """ rIDs = [int(long(j)) for j in requestIDs if long(j)] for reqID in rIDs: self.reqClient.cancelRequest(reqID) return S_OK()
def testMJD(self): ts = DateTime(45205.125, DateTime.MJD, DateTime.UTC) self.assertEqual(ts.nsecs(DateTime.UTC), long(399006000000000000)) self.assertEqual(ts.nsecs(DateTime.TAI), long(399006021000000000)) self.assertAlmostEqual(ts.get(DateTime.MJD, DateTime.UTC), 45205.125) self.assertAlmostEqual(ts.get(DateTime.MJD, DateTime.TAI), 45205.125 + 21.0 / 86400.0) self.assertTrue(ts.isValid())
def testNsecsDefault(self): ts = DateTime(long(1192755506000000000)) self.assertEqual(ts.nsecs(DateTime.UTC), long(1192755473000000000)) self.assertEqual(ts.nsecs(DateTime.TAI), long(1192755506000000000)) self.assertEqual(ts.nsecs(), long(1192755506000000000)) self.assertAlmostEqual(ts.get(DateTime.MJD, DateTime.UTC), 54392.040196759262) self.assertTrue(ts.isValid())
def checkValue(q, value): if not hasattr(q, "id"): return False if hasattr(q.id, "val"): if long(value) == q.id.val: return True if long(value) == q.id: return True
def getkeys(self, host): for script in self.getscripts(host): key = script["script"][self.scriptid]['pubkey'] yield Key(host['addr'], script["port"], "ssl", key['type'], key['bits'], RSA.construct((long(key['modulus']), long(key['exponent']),)), utils.decode_hex(script["script"][self.scriptid]['md5']))
def total_count(self): """Return total count of answers.""" if 'X-RF-TOTAL-COUNT' in self._req_response.headers: return long(self._req_response.headers.get("X-RF-TOTAL-COUNT")) elif 'counts' in self.result: # Indicates Connect API # force long, will be int in py3 otherwise if 'total' in self.result['counts']: return long(self.result['counts']['total']) elif "result" in self.result: # term query return long(self.result['result']['total_count']) return None
def getkeys(self, record): certtext = self._der2key(record['value']) if certtext is None: return yield Key( record['addr'], record["port"], "ssl", certtext['type'], int(certtext['len']), _rsa_construct( long(certtext['exponent']), long(self.modulus_badchars.sub(b"", certtext['modulus']), 16)), utils.decode_hex(record['infos']['md5']))
def getkeys(self, record): certtext = self._pem2key(record['fullvalue'] if 'fullvalue' in record else record['value']) if certtext is None: return yield Key( utils.int2ip(record['addr']), record["port"], "ssl", certtext['type'], int(certtext['len']), RSA.construct( (long(self.modulus_badchars.sub("", certtext['modulus']), 16), long(certtext['exponent']))), utils.decode_hex(record['infos']['md5hash']))
def getkeys(self, record): certtext = self._der2key(record['value']) if certtext is None: return yield Key(record['addr'], record["port"], "ssl", certtext['type'], int(certtext['len']), RSA.construct(( long(self.modulus_badchars.sub( b"", certtext['modulus']), 16), long(certtext['exponent']))), utils.decode_hex(record['infos']['md5']))
def do_basic(self, klass): ds = klass() self.failUnlessEqual(ds.len(), 0) self.failUnlessEqual(list(ds._dump()), []) self.failUnlessEqual(sum([len(d) for (s, d) in ds.get_chunks()]), 0) s1 = ds.get_spans() self.failUnlessEqual(ds.get(0, 4), None) self.failUnlessEqual(ds.pop(0, 4), None) ds.remove(0, 4) ds.add(2, b"four") self.failUnlessEqual(ds.len(), 4) self.failUnlessEqual(list(ds._dump()), [2, 3, 4, 5]) self.failUnlessEqual(sum([len(d) for (s, d) in ds.get_chunks()]), 4) s1 = ds.get_spans() self.failUnless((2, 2) in s1) self.failUnlessEqual(ds.get(0, 4), None) self.failUnlessEqual(ds.pop(0, 4), None) self.failUnlessEqual(ds.get(4, 4), None) ds2 = klass(ds) self.failUnlessEqual(ds2.len(), 4) self.failUnlessEqual(list(ds2._dump()), [2, 3, 4, 5]) self.failUnlessEqual(sum([len(d) for (s, d) in ds2.get_chunks()]), 4) self.failUnlessEqual(ds2.get(0, 4), None) self.failUnlessEqual(ds2.pop(0, 4), None) self.failUnlessEqual(ds2.pop(2, 3), b"fou") self.failUnlessEqual(sum([len(d) for (s, d) in ds2.get_chunks()]), 1) self.failUnlessEqual(ds2.get(2, 3), None) self.failUnlessEqual(ds2.get(5, 1), b"r") self.failUnlessEqual(ds.get(2, 3), b"fou") self.failUnlessEqual(sum([len(d) for (s, d) in ds.get_chunks()]), 4) ds.add(0, b"23") self.failUnlessEqual(ds.len(), 6) self.failUnlessEqual(list(ds._dump()), [0, 1, 2, 3, 4, 5]) self.failUnlessEqual(sum([len(d) for (s, d) in ds.get_chunks()]), 6) self.failUnlessEqual(ds.get(0, 4), b"23fo") self.failUnlessEqual(ds.pop(0, 4), b"23fo") self.failUnlessEqual(sum([len(d) for (s, d) in ds.get_chunks()]), 2) self.failUnlessEqual(ds.get(0, 4), None) self.failUnlessEqual(ds.pop(0, 4), None) ds = klass() ds.add(2, b"four") ds.add(3, b"ea") self.failUnlessEqual(ds.get(2, 4), b"fear") ds = klass() ds.add(long(2), b"four") ds.add(long(3), b"ea") self.failUnlessEqual(ds.get(long(2), long(4)), b"fear")
def getkeys(self, record): certtext = self._pem2key(record['fullvalue'] if 'fullvalue' in record else record['value']) if certtext is None: return yield Key(utils.int2ip(record['addr']), record["port"], "ssl", certtext['type'], int(certtext['len']), RSA.construct(( long(self.modulus_badchars.sub( "", certtext['modulus']), 16), long(certtext['exponent']))), utils.decode_hex(record['infos']['md5hash']))
def test_tokens_to_str(self): self.assertEqual(self.sel.tokens_to_str([]), '') self.assertEqual(self.sel.tokens_to_str(['a']), '/a') self.assertEqual(self.sel.tokens_to_str(['a', 0]), '/a/0') self.assertEqual(self.sel.tokens_to_str(('a', 0)), '/a/0') self.assertEqual(self.sel.tokens_to_str(('a', long(0))), '/a/0') self.assertEqual(self.sel.tokens_to_str(['a', '*']), '/a/*') self.assertEqual(self.sel.tokens_to_str(['a', 'b', 0]), '/a/b/0') self.assertEqual(self.sel.tokens_to_str(['a', 'b', [0, 1]]), '/a/b[0,1]') self.assertEqual(self.sel.tokens_to_str(['a', 'b', (0, 1)]), '/a/b[0,1]') self.assertEqual(self.sel.tokens_to_str(['a', 'b', slice(0, 5)]), '/a/b[0:5]') self.assertEqual(self.sel.tokens_to_str(['a', 'b', slice(long(0), long(5))]), '/a/b[0:5]') self.assertEqual(self.sel.tokens_to_str(['a', 'b', slice(None, 5)]), '/a/b[:5]')
def to_timestamp(self, obj): '''Return an integer that corresponds to the Unix timestamp, i.e. number of milliseconds since epoch. This method converts both datetime.datetime and numpy.datetime64 objects. ''' if obj is None: return obj if isinstance(obj, Period): # extract the start of the Period obj = obj.to_timestamp() if six.PY2: if isinstance(obj, long): # compat with python2 long from datetime.datetime obj = obj / 1000000000 return long(obj) if isinstance(obj, numpy.datetime64): if str(obj) == "NaT": return None # astype(datetime) returns an int or a long (in python 2) obj = obj.astype(datetime) if isinstance(obj, date) and not isinstance(obj, datetime): # handle numpy "datetime64[D/W/M/Y]" - mktime output in seconds return int((time.mktime(obj.timetuple())) * 1000) if six.PY2: if isinstance(obj, long): return long(round(obj / 1000000)) if isinstance(obj, int): return round(obj / 1000000) if isinstance(obj, (int, float)): # figure out whether the timestamp is in seconds or milliseconds try: # if it overflows, it's milliseconds - otherwise convert from seconds to milliseconds. datetime.fromtimestamp(obj) return int(obj * 1000) except (ValueError, OverflowError): # milliseconds return int(obj) # Convert `datetime.datetime` and `pandas.Timestamp` to millisecond timestamps return int( (time.mktime(obj.timetuple()) + obj.microsecond / 1000000.0) * 1000)
def singleDiracBenchmark(iterations=1, measuredCopies=None): """ Get Normalized Power of one CPU in DIRAC Benchmark 2012 units (DB12) """ # This number of iterations corresponds to 1kHS2k.seconds, i.e. 250 HS06 seconds n = int(1000 * 1000 * 12.5) calib = 250.0 m = long(0) m2 = long(0) p = 0 p2 = 0 # Do one iteration extra to allow CPUs with variable speed (we ignore zeroth iteration) # Do one or more extra iterations to avoid tail effects when copies run in parallel i = 0 while (i <= iterations) or (measuredCopies is not None and measuredCopies.value > 0): if i == 1: start = os.times() # Now the iterations for _j in xrange(n): t = random.normalvariate(10, 1) m += t m2 += t * t p += t p2 += t * t if i == iterations: end = os.times() if measuredCopies is not None: # Reduce the total of running copies by one measuredCopies.value -= 1 i += 1 cput = sum(end[:4]) - sum(start[:4]) wall = end[4] - start[4] if not cput: return None # Return DIRAC-compatible values return { 'CPU': cput, 'WALL': wall, 'NORM': calib * iterations / cput, 'UNIT': 'DB12' }
def to_timestamp(self, obj): '''Return an integer that corresponds to the Unix timestamp, i.e. number of milliseconds since epoch. This method converts both datetime.datetime and numpy.datetime64 objects. ''' if obj is None: return obj if obj.__class__.__name__ == "date": # handle updating datetime with date object obj = datetime(obj.year, obj.month, obj.day) if isinstance(obj, Period): # extract the start of the Period obj = obj.to_timestamp() if six.PY2: if isinstance(obj, long): # compat with python2 long from datetime.datetime obj = obj / 1000000000 return long(obj) if isinstance(obj, numpy.datetime64): if str(obj) == "NaT": return None # astype(datetime) returns an int or a long (in python 2) obj = obj.astype(datetime) if isinstance(obj, date) and not isinstance(obj, datetime): # handle numpy "datetime64[D/W/M/Y]" - mktime output in seconds return int((time.mktime(obj.timetuple())) * 1000) if six.PY2: if isinstance(obj, long): return long(round(obj / 1000000)) if isinstance(obj, int): return round(obj / 1000000) if isinstance(obj, (int, float)): return _normalize_timestamp(obj) return int( (time.mktime(obj.timetuple()) + obj.microsecond / 1000000.0) * 1000)
def testToDict(self): ps = dafBase.PropertySet() ps.setBool("bool", True) ps.setShort("short", 42) ps.setInt("int", 2008) ps.setLongLong("int64_t", long(0xfeeddeadbeef)) ps.setInt("ints", [10, 9, 8]) ps2 = dafBase.PropertySet() ps2.set("ps", ps) ps2.setFloat("float", 3.14159) ps2.setDouble("double", 2.718281828459045) ps2.set("char*", "foo") ps2.setString("string", "bar") ps2.set( "dt", dafBase.DateTime("20090402T072639.314159265Z", dafBase.DateTime.UTC)) d = ps2.toDict() self.assertIsInstance(d, dict) self.assertIsInstance(d["float"], float) self.assertAlmostEqual(d["float"], 3.14159, 6) self.assertIsInstance(d["double"], float) self.assertEqual(d["double"], 2.718281828459045) self.assertIsInstance(d["char*"], basestring) self.assertEqual(d["char*"], "foo") self.assertIsInstance(d["string"], basestring) self.assertEqual(d["string"], "bar") self.assertIsInstance(d["dt"], dafBase.DateTime) self.assertEqual(d["dt"].nsecs(), long(1238657233314159265)) d2 = d["ps"] self.assertIsInstance(d2, dict) self.assertIsInstance(d2["bool"], bool) self.assertEqual(d2["bool"], True) self.assertIsInstance(d2["short"], (int, long)) self.assertEqual(d2["short"], 42) self.assertIsInstance(d2["int"], (int, long)) self.assertEqual(d2["int"], 2008) self.assertIsInstance(d2["int64_t"], (int, long)) self.assertEqual(d2["int64_t"], long(0xfeeddeadbeef)) self.assertIsInstance(d2["ints"], list) self.assertIsInstance(d2["ints"][0], (int, long)) self.assertEqual(d2["ints"], [10, 9, 8])
def _calc_hash(op, args, keywords): """ Calculates the hash of an AST, given the operation, args, and kwargs. :param op: The operation. :param args: The arguments to the operation. :param keywords: A dict including the 'symbolic', 'variables', and 'length' items. :returns: a hash. We do it using md5 to avoid hash collisions. (hash(-1) == hash(-2), for example) """ args_tup = tuple( long(a) if type(a) is int and int is not long else ( a if type(a) in (long, float) else hash(a)) for a in args) to_hash = (op, args_tup, keywords['symbolic'], hash(keywords['variables']), str(keywords.get('length', None)), hash(keywords.get('annotations', None))) # Why do we use md5 when it's broken? Because speed is more important # than cryptographic integrity here. Then again, look at all those # allocations we're doing here... fast python is painful. hd = hashlib.md5(pickle.dumps(to_hash, -1)).digest() return md5_unpacker.unpack(hd)[0] # 64 bits
def RenderValue(self, value): # Always render ints as longs - so that there's no ambiguity in the UI # renderers when type depends on the value. if isinstance(value, int): value = long(value) return self._IncludeTypeInfo(value, value)
def to_date_components(self, obj): '''Return a dictionary of string keys and integer values for `year`, `month`, and `day`. This method converts both datetime.date and numpy.datetime64 objects that contain datetime.date. ''' if obj is None: return obj if isinstance(obj, (int, float)): obj = datetime.fromtimestamp(_normalize_timestamp(obj) / 1000) if six.PY2: if isinstance(obj, (long)): obj = datetime.fromtimestamp(long(obj)) if isinstance(obj, numpy.datetime64): if str(obj) == "NaT": return None obj = obj.astype(datetime) if (six.PY2 and isinstance(obj, long)) or isinstance(obj, int): obj = datetime.fromtimestamp(obj / 1000000000) return {"year": obj.year, "month": obj.month, "day": obj.day}
def read_word(self, vaddr): """Address have to be aligned!""" laddr = self._vtop(vaddr) # word = self._target_platform.get_word_type().from_address(long(laddr)).value word = self._ctypes.c_ulong.from_address(long(laddr)).value # is non-aligned a pb ?, indianess is at risk return word
def _getinfos_ssh_hostkey(spec): """Parse SSH host keys.""" infos = {} data = utils.nmap_decode_data(spec['value']) for hashtype in ['md5', 'sha1', 'sha256']: infos[hashtype] = hashlib.new(hashtype, data).hexdigest() data = utils.parse_ssh_key(data) keytype = infos["algo"] = next(data).decode() if keytype == "ssh-rsa": try: infos["exponent"], infos["modulus"] = ( long(utils.encode_hex(elt), 16) for elt in data ) except Exception: utils.LOGGER.info("Cannot parse SSH host key for record %r", spec, exc_info=True) else: infos["bits"] = math.ceil(math.log(infos["modulus"], 2)) # convert integer to strings to prevent overflow errors # (e.g., "MongoDB can only handle up to 8-byte ints") for val in ["exponent", "modulus"]: infos[val] = str(infos[val]) elif keytype == 'ecdsa-sha2-nistp256': infos['bits'] = 256 elif keytype == 'ssh-ed25519': infos['bits'] = len(next(data)) * 8 return {'infos': infos}
def to_date_components(self, obj): '''Return a dictionary of string keys and integer values for `year`, `month` (from 0 - 11), and `day`. This method converts both datetime.date and numpy.datetime64 objects that contain datetime.date. ''' if obj is None: return obj if isinstance(obj, (int, float)): obj = datetime.fromtimestamp(_normalize_timestamp(obj) / 1000) if six.PY2: if isinstance(obj, (long)): obj = datetime.fromtimestamp(long(obj)) if isinstance(obj, numpy.datetime64): if str(obj) == "NaT": return None obj = obj.astype(datetime) if (six.PY2 and isinstance(obj, long)) or isinstance(obj, int): obj = datetime.fromtimestamp(obj / 1000000000) # Perspective stores month in `t_date` as an integer [0-11], # while Python stores month as [1-12], so decrement the Python value # before Perspective attempts to use it. return {"year": obj.year, "month": obj.month - 1, "day": obj.day}
def from_data(self, data, panda_id): for attr, val in iteritems(data): # skip non attributes if attr not in self.attributes: continue setattr(self, attr, val) self.PandaID = long(panda_id)
def Validate(self, value): if value is None: value = 0 if not isinstance(value, (int, long)): raise TypeValueError("Invalid value %s for Integer" % value) return long(value)
def _asdict(self): return { 'name': self.name, 'codeinfo': self.phase_record.codeinfo, 'descriptor_id': self.phase_record.descriptor_id, 'start_time_millis': long(self.phase_record.start_time_millis), 'options': self.phase_record.options, 'attachments': self.attachments, 'measurements': self.measurements, }
def test_encode_property_long(self): geometry = 'POINT(0 0)' properties = { 'test_int': int(1), 'test_long': long(1) } self.assertRoundTrip( input_geometry=geometry, expected_geometry=[[0, 0]], properties=properties)
def test_encode_property_long(self): geometry = 'POINT(0 0)' properties = { 'test_int': int(1), 'test_long': long(1) } self.assertRoundTrip( input_geometry=geometry, expected_geometry={'type': 'Point', 'coordinates': [0, 0]}, properties=properties)
def events_to_update(self, workspec): # get logger tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(workspec.workerID), method_name='events_to_update') # look for the json just under the access point retDict = dict() for pandaID in workspec.pandaid_list: # look for the json just under the access point accessPoint = self.get_access_point(workspec, pandaID) jsonFilePath = os.path.join(accessPoint, jsonEventsUpdateFileName) readJsonPath = jsonFilePath + suffixReadJson # first look for json.read which is not yet acknowledged tmpLog.debug('looking for event update file {0}'.format(readJsonPath)) if os.path.exists(readJsonPath): pass else: tmpLog.debug('looking for event update file {0}'.format(jsonFilePath)) if not os.path.exists(jsonFilePath): # not found tmpLog.debug('not found') continue try: # rename to prevent from being overwritten os.rename(jsonFilePath, readJsonPath) except Exception: tmpLog.error('failed to rename json') continue # load json nData = 0 try: with open(readJsonPath) as jsonFile: tmpOrigDict = json.load(jsonFile) newDict = dict() # change the key from str to int for tmpPandaID, tmpDict in iteritems(tmpOrigDict): tmpPandaID = long(tmpPandaID) retDict[tmpPandaID] = tmpDict nData += len(tmpDict) except Exception: tmpLog.error('failed to load json') # delete empty file if nData == 0: try: os.remove(readJsonPath) except Exception: pass tmpLog.debug('got {0} events for PandaID={1}'.format(nData, pandaID)) return retDict
def t_INT_CONST(self, t): # [Ll] suffix has been deprecated r'[+-]?([1-9][0-9]*|0[xX]?[0-9a-fA-F]+|0)' # r'[+-]?([1-9][0-9]*|0)[lL]?' # original regex for decimal integers in ncgen3.l file # r'0[xX]?[0-9a-fA-F]+[lL]?' # original regex for octal or hex # integers in ncgen3.l file try: long_val = long(eval(t.value)) except SyntaxError: try: long_val = long(eval(t.value[0]+'o'+t.value[1:])) except TypeError: errmsg = "Bad integer constant: %s" % t.value raise CDLContentError(errmsg) except TypeError: errmsg = "Bad integer constant: %s" % t.value raise CDLContentError(errmsg) if long_val < XDR_INT_MIN or long_val > XDR_INT_MAX: errmsg = "Integer constant outside valid range (%d -> %d): %s" \ % (XDR_INT_MIN, XDR_INT_MAX, int_val) raise CDLContentError(errmsg) else: t.value = np.int32(long_val) return t
def get_input_file_attributes(self, skip_ready=False): lfnToSkip = set() attemptNrMap = dict() pathMap = dict() for fileSpec in self.inFiles: if skip_ready and fileSpec.status == 'ready': lfnToSkip.add(fileSpec.lfn) attemptNrMap[fileSpec.lfn] = fileSpec.attemptNr pathMap[fileSpec.lfn] = fileSpec.path inFiles = {} lfns = self.jobParams['inFiles'].split(',') guids = self.jobParams['GUID'].split(',') fsizes = self.jobParams['fsize'].split(',') chksums = self.jobParams['checksum'].split(',') scopes = self.jobParams['scopeIn'].split(',') datasets = self.jobParams['realDatasetsIn'].split(',') endpoints = self.jobParams['ddmEndPointIn'].split(',') for lfn, guid, fsize, chksum, scope, dataset, endpoint in \ zip(lfns, guids, fsizes, chksums, scopes, datasets, endpoints): try: fsize = long(fsize) except Exception: fsize = None if lfn in lfnToSkip: continue if lfn in attemptNrMap: attemptNr = attemptNrMap[lfn] else: attemptNr = 0 inFiles[lfn] = {'fsize': fsize, 'guid': guid, 'checksum': chksum, 'scope': scope, 'dataset': dataset, 'endpoint': endpoint, 'attemptNr': attemptNr} # add path if 'inFilePaths' in self.jobParams: for lfn in lfns: if lfn not in inFiles or lfn not in pathMap: continue inFiles[lfn]['path'] = pathMap[lfn] # delete empty file if '' in inFiles: del inFiles[''] if 'NULL' in inFiles: del inFiles['NULL'] return inFiles
def record_timing_context(self): """Context manager for the execution of a single phase. This method performs some pre-phase setup on self (for measurements), and records the start and end time based on when the context is entered/exited. Yields: None """ self._cached['start_time_millis'] = long( self.phase_record.record_start_time()) try: yield finally: self._finalize_measurements() self._set_phase_outcome() self.phase_record.finalize_phase(self.options)
def ctypes_to_python_array(self, array): """Converts an array of undetermined Basic self.__ctypes class to a python array, by guessing it's type from it's class name. This is a bad example of introspection. """ if isinstance(array, str) or isinstance(array, bytes): # special case for c_char[] return array if not self._ctypes.is_array_of_basic_instance(array): raise TypeError('NOT-AN-Basic-Type-ARRAY') if array._type_ in [self._ctypes.c_int, self._ctypes.c_uint, self._ctypes.c_long, self._ctypes.c_ulong, self._ctypes.c_ubyte, self._ctypes.c_byte]: return [long(el) for el in array] if array._type_ in [self._ctypes.c_float, self._ctypes.c_double, self._ctypes.c_longdouble]: return [float(el) for el in array] sb = ''.join([struct.pack(array._type_._type_, el) for el in array]) return sb
def history(self): """Get a history.History instance for this remote test. Note that accessing this attribute triggers an RPC to check for any new history entries since the most recent known one. This means saving a reference to the return value and reusing it will not trigger an update from the remote end. Alternatively, accessing the 'cached_history' attribute will reference only local already-known history. """ last_start_time = self._cached_history.last_start_time(self.test_uid) new_history = self.shared_proxy.get_history_after( self.test_uid, long(last_start_time)) _LOG.debug('Requested history update for %s after %s, got %s results.', self.test_uid, last_start_time, len(new_history)) for record in new_history: self._cached_history.APPEND_RECORD(self.test_uid, record) return self.cached_history
def FromString(self, string): try: return long(string) except ValueError: raise TypeValueError("Invalid value %s for Integer" % string)
def getkeys(self, record): yield Key(record['addr'], record["port"], "ssh", record['infos']['algo'][4:], record['infos']['bits'], RSA.construct((long(record['infos']['modulus']), long(record['infos']['exponent']))), utils.decode_hex(record['infos']['md5']))
def pem2key(cls, pem): certtext = cls._pem2key(pem) return None if certtext is None else RSA.construct(( long(cls.modulus_badchars.sub(b"", certtext['modulus']), 16), long(certtext['exponent']), ))
def _get_number(s): try: return long(s) except (ValueError, TypeError): return 0
def now(self): return long(time.time() * 1000)
def rpc_find_node(self, sender, nodeid, key): self.log.info("finding neighbors of %i in local table" % long(binascii.hexlify(nodeid), 16)) source = Node(nodeid, sender[0], sender[1]) self.welcomeIfNewNode(source) node = Node(key) return list(map(tuple, self.router.findNeighbors(node, exclude=source)))
def get_files_to_stage_out(self, workspec): # get logger tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(workspec.workerID), method_name='get_files_to_stage_out') fileDict = dict() # look for the json just under the access point for pandaID in workspec.pandaid_list: # look for the json just under the access point accessPoint = self.get_access_point(workspec, pandaID) jsonFilePath = os.path.join(accessPoint, jsonOutputsFileName) readJsonPath = jsonFilePath + suffixReadJson # first look for json.read which is not yet acknowledged tmpLog.debug('looking for output file {0}'.format(readJsonPath)) if os.path.exists(readJsonPath): pass else: tmpLog.debug('looking for output file {0}'.format(jsonFilePath)) if not os.path.exists(jsonFilePath): # not found tmpLog.debug('not found') continue try: tmpLog.debug('found') # rename to prevent from being overwritten os.rename(jsonFilePath, readJsonPath) except Exception: tmpLog.error('failed to rename json') continue # load json toSkip = False loadDict = None try: with open(readJsonPath) as jsonFile: loadDict = json.load(jsonFile) except Exception: tmpLog.error('failed to load json') toSkip = True # test validity of data format (ie it should be a Dictionary) if not toSkip: if not isinstance(loadDict, dict): tmpLog.error('loaded data is not a dictionary') toSkip = True # collect files and events nData = 0 if not toSkip: sizeMap = dict() chksumMap = dict() eventsList = dict() for tmpPandaID, tmpEventMapList in iteritems(loadDict): tmpPandaID = long(tmpPandaID) # test if tmpEventMapList is a list if not isinstance(tmpEventMapList, list): tmpLog.error('loaded data item is not a list') toSkip = True break for tmpEventInfo in tmpEventMapList: try: nData += 1 if 'eventRangeID' in tmpEventInfo: tmpEventRangeID = tmpEventInfo['eventRangeID'] else: tmpEventRangeID = None tmpFileDict = dict() pfn = tmpEventInfo['path'] lfn = os.path.basename(pfn) tmpFileDict['path'] = pfn if pfn not in sizeMap: if 'fsize' in tmpEventInfo: sizeMap[pfn] = tmpEventInfo['fsize'] else: sizeMap[pfn] = os.stat(pfn).st_size tmpFileDict['fsize'] = sizeMap[pfn] tmpFileDict['type'] = tmpEventInfo['type'] if tmpEventInfo['type'] in ['log', 'output']: # disable zipping tmpFileDict['isZip'] = 0 elif tmpEventInfo['type'] == 'zip_output': # already zipped tmpFileDict['isZip'] = 1 elif 'isZip' in tmpEventInfo: tmpFileDict['isZip'] = tmpEventInfo['isZip'] # guid if 'guid' in tmpEventInfo: tmpFileDict['guid'] = tmpEventInfo['guid'] else: tmpFileDict['guid'] = str(uuid.uuid4()) # get checksum if pfn not in chksumMap: if 'chksum' in tmpEventInfo: chksumMap[pfn] = tmpEventInfo['chksum'] else: chksumMap[pfn] = core_utils.calc_adler32(pfn) tmpFileDict['chksum'] = chksumMap[pfn] if tmpPandaID not in fileDict: fileDict[tmpPandaID] = dict() if lfn not in fileDict[tmpPandaID]: fileDict[tmpPandaID][lfn] = [] fileDict[tmpPandaID][lfn].append(tmpFileDict) # skip if unrelated to events if tmpFileDict['type'] not in ['es_output', 'zip_output']: continue tmpFileDict['eventRangeID'] = tmpEventRangeID if tmpPandaID not in eventsList: eventsList[tmpPandaID] = list() eventsList[tmpPandaID].append({'eventRangeID': tmpEventRangeID, 'eventStatus': tmpEventInfo['eventStatus']}) except Exception: core_utils.dump_error_message(tmpLog) # dump events if not toSkip: if len(eventsList) > 0: curName = os.path.join(accessPoint, jsonEventsUpdateFileName) newName = curName + '.new' f = open(newName, 'w') json.dump(eventsList, f) f.close() os.rename(newName, curName) # remove empty file if toSkip or nData == 0: try: os.remove(readJsonPath) except Exception: pass tmpLog.debug('got {0} files for PandaID={1}'.format(nData, pandaID)) return fileDict
def test_longID(self): rid = hashlib.sha1(os.urandom(32)).digest() n = Node(rid) self.assertEqual(n.long_id, long(binascii.hexlify(rid), 16))
def data2key(data): data = utils.parse_ssh_key(data) _, exp, mod = (next(data), # noqa: F841 (_) long(utils.encode_hex(next(data)), 16), long(utils.encode_hex(next(data)), 16)) return RSA.construct((mod, exp))
def convert_to_base_types(obj, ignore_keys=tuple(), tuple_type=tuple, json_safe=True): """Recursively convert objects into base types. This is used to convert some special types of objects used internally into base types for more friendly output via mechanisms such as JSON. It is used for sending internal objects via the network and outputting test records. Specifically, the conversions that are performed: - If an object has an as_base_types() method, immediately return the result without any recursion; this can be used with caching in the object to prevent unnecessary conversions. - If an object has an _asdict() method, use that to convert it to a dict and recursively converting its contents. - mutablerecords Record instances are converted to dicts that map attribute name to value. Optional attributes with a value of None are skipped. - Enum instances are converted to strings via their .name attribute. - Real and integral numbers are converted to built-in types. - Byte and unicode strings are left alone (instances of six.string_types). - Other non-None values are converted to strings via str(). The return value contains only the Python built-in types: dict, list, tuple, str, unicode, int, float, long, bool, and NoneType (unless tuple_type is set to something else). If tuples should be converted to lists (e.g. for an encoding that does not differentiate between the two), pass 'tuple_type=list' as an argument. If `json_safe` is True, then the float 'inf', '-inf', and 'nan' values will be converted to strings. This ensures that the returned dictionary can be passed to json.dumps to create valid JSON. Otherwise, json.dumps may return values such as NaN which are not valid JSON. """ # Because it's *really* annoying to pass a single string accidentally. assert not isinstance(ignore_keys, six.string_types), 'Pass a real iterable!' if hasattr(obj, 'as_base_types'): return obj.as_base_types() if hasattr(obj, '_asdict'): obj = obj._asdict() elif isinstance(obj, records.RecordClass): obj = {attr: getattr(obj, attr) for attr in type(obj).all_attribute_names if (getattr(obj, attr, None) is not None or attr in type(obj).required_attributes)} elif isinstance(obj, Enum): obj = obj.name if type(obj) in PASSTHROUGH_TYPES: return obj # Recursively convert values in dicts, lists, and tuples. if isinstance(obj, dict): return {convert_to_base_types(k, ignore_keys, tuple_type): convert_to_base_types(v, ignore_keys, tuple_type) for k, v in six.iteritems(obj) if k not in ignore_keys} elif isinstance(obj, list): return [convert_to_base_types(val, ignore_keys, tuple_type, json_safe) for val in obj] elif isinstance(obj, tuple): return tuple_type( convert_to_base_types(value, ignore_keys, tuple_type, json_safe) for value in obj) # Convert numeric types (e.g. numpy ints and floats) into built-in types. elif isinstance(obj, numbers.Integral): return long(obj) elif isinstance(obj, numbers.Real): as_float = float(obj) if json_safe and (math.isinf(as_float) or math.isnan(as_float)): return str(as_float) return as_float # Convert all other types to strings. try: return str(obj) except: logging.warning('Problem casting object of type %s to str.', type(obj)) raise
def __init__(self, id, ip=None, port=None, can_test=0): self.id = id self.ip = ip self.port = port self.can_test = can_test self.long_id = long(binascii.hexlify(id), 16)