def __init__(self, arraysize=1, executed=None): self.arraysize = arraysize if executed is None: self.executed = [] else: self.executed = executed self._fetchone_data = [("fetchone%d" % i,) for i in iter_range(3)] self._fetchall_data = [("fetchall%d" % i,) for i in iter_range(2)] self._fetchmany_data = [("fetchmany%d" % i,) for i in iter_range(5)]
def convert_param_marks(statement, from_param_mark, to_param_mark): # TODO: Add support for $foo$bar$foo$ literals. if from_param_mark == to_param_mark or from_param_mark not in statement: return statement tokens = statement.split("'") for i in iter_range(0, len(tokens), 2): tokens[i] = tokens[i].replace(from_param_mark, to_param_mark) return "'".join(tokens)
def test_fit_size(Cache): """ A cache of size n can hold at least n objects. """ size = 10 cache = Cache(size) for i in iter_range(size): cache.add(StubObjectInfo(i)) assert len(cache.get_cached()) == size
def test_generational_cache_set_size_limit(): """ Setting the size limits the cache's size just like passing an initial size would. """ size = 10 cache = GenerationalCache(size * 100) cache.set_size(size) for value in iter_range(size * 10): cache.add(StubObjectInfo(value)) assert len(cache.get_cached()) == size * 2
def test_generational_cache_size_limit(): """ A cache will never hold more than twice its size in objects. The generational system is what prevents it from holding exactly the requested number of objects. """ size = 10 cache = GenerationalCache(size) for value in iter_range(5 * size): cache.add(StubObjectInfo(value)) assert len(cache.get_cached()) == size * 2
def test_generational_cache_set_size_smaller_than_current_size(): """ Setting the size to a smaller size than the number of objects currently cached will drop some of the extra content. Note that because of the generation system, it can actually hold two times the size requested in edge cases. """ cache = GenerationalCache(150) for i in iter_range(250): cache.add(StubObjectInfo(i)) cache.set_size(100) cached = cache.get_cached() assert len(cached) == 100 for obj_info in cache.get_cached(): assert obj_info.id >= 100
def __init__(self, fileobj): if isinstance(fileobj, basestring): self._filename = fileobj fileobj = open(fileobj) elif hasattr(fileobj, "name"): self._filename = fileobj.name else: self._filename = repr(fileobj) # From tzfile(5): # # The time zone information files used by tzset(3) # begin with the magic characters "TZif" to identify # them as time zone information files, followed by # sixteen bytes reserved for future use, followed by # six four-byte values of type long, written in a # ``standard'' byte order (the high-order byte # of the value is written first). if fileobj.read(4) != "TZif": raise ValueError("magic not found") fileobj.read(16) ( # The number of UTC/local indicators stored in the file. ttisgmtcnt, # The number of standard/wall indicators stored in the file. ttisstdcnt, # The number of leap seconds for which data is # stored in the file. leapcnt, # The number of "transition times" for which data # is stored in the file. timecnt, # The number of "local time types" for which data # is stored in the file (must not be zero). typecnt, # The number of characters of "time zone # abbreviation strings" stored in the file. charcnt, ) = struct.unpack(">6l", fileobj.read(24)) # The above header is followed by tzh_timecnt four-byte # values of type long, sorted in ascending order. # These values are written in ``standard'' byte order. # Each is used as a transition time (as returned by # time(2)) at which the rules for computing local time # change. if timecnt: self._trans_list = struct.unpack(">%dl" % timecnt, fileobj.read(timecnt * 4)) else: self._trans_list = [] # Next come tzh_timecnt one-byte values of type unsigned # char; each one tells which of the different types of # ``local time'' types described in the file is associated # with the same-indexed transition time. These values # serve as indices into an array of ttinfo structures that # appears next in the file. if timecnt: self._trans_idx = struct.unpack(">%dB" % timecnt, fileobj.read(timecnt)) else: self._trans_idx = [] # Each ttinfo structure is written as a four-byte value # for tt_gmtoff of type long, in a standard byte # order, followed by a one-byte value for tt_isdst # and a one-byte value for tt_abbrind. In each # structure, tt_gmtoff gives the number of # seconds to be added to UTC, tt_isdst tells whether # tm_isdst should be set by localtime(3), and # tt_abbrind serves as an index into the array of # time zone abbreviation characters that follow the # ttinfo structure(s) in the file. ttinfo = [] for i in iter_range(typecnt): ttinfo.append(struct.unpack(">lbb", fileobj.read(6))) abbr = fileobj.read(charcnt) # Then there are tzh_leapcnt pairs of four-byte # values, written in standard byte order; the # first value of each pair gives the time (as # returned by time(2)) at which a leap second # occurs; the second gives the total number of # leap seconds to be applied after the given time. # The pairs of values are sorted in ascending order # by time. # Not used, for now if leapcnt: leap = struct.unpack(">%dl" % leapcnt * 2, fileobj.read(leapcnt * 8)) # Then there are tzh_ttisstdcnt standard/wall # indicators, each stored as a one-byte value; # they tell whether the transition times associated # with local time types were specified as standard # time or wall clock time, and are used when # a time zone file is used in handling POSIX-style # time zone environment variables. if ttisstdcnt: isstd = struct.unpack(">%db" % ttisstdcnt, fileobj.read(ttisstdcnt)) # Finally, there are tzh_ttisgmtcnt UTC/local # indicators, each stored as a one-byte value; # they tell whether the transition times associated # with local time types were specified as UTC or # local time, and are used when a time zone file # is used in handling POSIX-style time zone envi- # ronment variables. if ttisgmtcnt: isgmt = struct.unpack(">%db" % ttisgmtcnt, fileobj.read(ttisgmtcnt)) # ** Everything has been read ** # Build ttinfo list self._ttinfo_list = [] for i in iter_range(typecnt): gmtoff, isdst, abbrind = ttinfo[i] # Round to full-minutes if that's not the case. Python's # datetime doesn't accept sub-minute timezones. Check # http://python.org/sf/1447945 for some information. gmtoff = (gmtoff + 30) // 60 * 60 tti = _ttinfo() tti.offset = gmtoff tti.delta = datetime.timedelta(seconds=gmtoff) tti.isdst = isdst tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)] tti.isstd = (ttisstdcnt > i and isstd[i] != 0) tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0) self._ttinfo_list.append(tti) # Replace ttinfo indexes for ttinfo objects. trans_idx = [] for idx in self._trans_idx: trans_idx.append(self._ttinfo_list[idx]) self._trans_idx = tuple(trans_idx) # Set standard, dst, and before ttinfos. before will be # used when a given time is before any transitions, # and will be set to the first non-dst ttinfo, or to # the first dst, if all of them are dst. self._ttinfo_std = None self._ttinfo_dst = None self._ttinfo_before = None if self._ttinfo_list: if not self._trans_list: self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0] else: for i in iter_range(timecnt - 1, -1, -1): tti = self._trans_idx[i] if not self._ttinfo_std and not tti.isdst: self._ttinfo_std = tti elif not self._ttinfo_dst and tti.isdst: self._ttinfo_dst = tti if self._ttinfo_std and self._ttinfo_dst: break else: if self._ttinfo_dst and not self._ttinfo_std: self._ttinfo_std = self._ttinfo_dst for tti in self._ttinfo_list: if not tti.isdst: self._ttinfo_before = tti break else: self._ttinfo_before = self._ttinfo_list[0] # Now fix transition times to become relative to wall time. # # I'm not sure about this. In my tests, the tz source file # is setup to wall time, and in the binary file isstd and # isgmt are off, so it should be in wall time. OTOH, it's # always in gmt time. Let me know if you have comments # about this. laststdoffset = 0 self._trans_list = list(self._trans_list) for i in iter_range(len(self._trans_list)): tti = self._trans_idx[i] if not tti.isdst: # This is std time. self._trans_list[i] += tti.offset laststdoffset = tti.offset else: # This is dst time. Convert to std. self._trans_list[i] += laststdoffset self._trans_list = tuple(self._trans_list)
def obj_infos(): return [StubObjectInfo(i) for i in iter_range(10)]
def track_contexts(n): return [TrackContext() for i in iter_range(n)]
FromExpr, ) from storm.uri import URI # We need the info to register the 'type' compiler. In normal # circumstances this is naturally imported. import storm.info from tests import has_fixtures, has_subunit from tests.databases.base import (DatabaseTest, DatabaseDisconnectionTest, UnsupportedDatabaseTest, TwoPhaseCommitTest, TwoPhaseCommitDisconnectionTest) from tests.helper import assert_variables_equal, TestHelper # Create columnN, tableN, and elemN variables. for i in iter_range(10): for name in ["column", "elem"]: exec("%s%d = SQLToken('%s%d')" % (name, i, name, i)) for name in ["table"]: exec("%s%d = '%s %d'" % (name, i, name, i)) class TrackContext(FromExpr): context = None @compile.when(TrackContext) def compile_track_context(compile, expr, state): expr.context = state.context return ""