def get(self, key, default=None, version=None): key = self.make_key(key, version=version) self.validate_key(key) db = router.db_for_read(self.cache_model_class) table = connections[db].ops.quote_name(self._table) cursor = connections[db].cursor() cursor.execute("SELECT cache_key, value, expires FROM %s " "WHERE cache_key = %%s" % table, [key]) row = cursor.fetchone() if row is None: return default now = timezone.now() expires = row[2] if connections[db].features.needs_datetime_string_cast and not isinstance(expires, datetime): # Note: typecasting is needed by some 3rd party database backends. # All core backends work without typecasting, so be careful about # changes here - test suite will NOT pick regressions here. expires = typecast_timestamp(str(expires)) if expires < now: db = router.db_for_write(self.cache_model_class) cursor = connections[db].cursor() cursor.execute("DELETE FROM %s " "WHERE cache_key = %%s" % table, [key]) return default value = connections[db].ops.process_clob(row[1]) return pickle.loads(base64.b64decode(force_bytes(value)))
def results_iter(self): """ Returns an iterator over the results from executing this query. """ resolve_columns = hasattr(self, 'resolve_columns') if resolve_columns: from django.db.models.fields import DateTimeField fields = [DateTimeField()] else: from django.db.backends.util import typecast_timestamp needs_string_cast = self.connection.features.needs_datetime_string_cast offset = len(self.query.extra_select) for rows in self.execute_sql(MULTI): for row in rows: datetime = row[offset] if resolve_columns: datetime = self.resolve_columns(row, fields)[offset] elif needs_string_cast: datetime = typecast_timestamp(str(datetime)) # Datetimes are artifically returned in UTC on databases that # don't support time zone. Restore the zone used in the query. if settings.USE_TZ: datetime = datetime.replace(tzinfo=None) datetime = timezone.make_aware(datetime, self.query.tzinfo) yield datetime
def results_iter(self): """ Returns an iterator over the results from executing this query. """ resolve_columns = hasattr(self, 'resolve_columns') if resolve_columns: from django.db.models.fields import DateTimeField fields = [DateTimeField()] else: from django.db.backends.util import typecast_timestamp needs_string_cast = self.connection.features.needs_datetime_string_cast offset = len(self.query.extra_select) for rows in self.execute_sql(MULTI): for row in rows: datetime = row[offset] if resolve_columns: datetime = self.resolve_columns(row, fields)[offset] elif needs_string_cast: datetime = typecast_timestamp(str(datetime)) # Datetimes are artifically returned in UTC on databases that # don't support time zone. Restore the zone used in the query. if settings.USE_TZ: if datetime is None: raise ValueError("Database returned an invalid value " "in QuerySet.dates(). Are time zone " "definitions and pytz installed?") datetime = datetime.replace(tzinfo=None) datetime = timezone.make_aware(datetime, self.query.tzinfo) yield datetime
def get(self, key, default=None, version=None): key = self.make_key(key, version=version) self.validate_key(key) db = router.db_for_read(self.cache_model_class) table = connections[db].ops.quote_name(self._table) cursor = connections[db].cursor() cursor.execute( "SELECT cache_key, value, expires FROM %s " "WHERE cache_key = %%s" % table, [key]) row = cursor.fetchone() if row is None: return default now = timezone.now() expires = row[2] if connections[ db].features.needs_datetime_string_cast and not isinstance( expires, datetime): # Note: typecasting is needed by some 3rd party database backends. # All core backends work without typecasting, so be careful about # changes here - test suite will NOT pick regressions here. expires = typecast_timestamp(str(expires)) if expires < now: db = router.db_for_write(self.cache_model_class) cursor = connections[db].cursor() cursor.execute("DELETE FROM %s " "WHERE cache_key = %%s" % table, [key]) return default value = connections[db].ops.process_clob(row[1]) return pickle.loads(base64.b64decode(force_bytes(value)))
def _base_set(self, mode, key, value, timeout=DEFAULT_TIMEOUT): if timeout == DEFAULT_TIMEOUT: timeout = self.default_timeout db = router.db_for_write(self.cache_model_class) table = connections[db].ops.quote_name(self._table) cursor = connections[db].cursor() cursor.execute("SELECT COUNT(*) FROM %s" % table) num = cursor.fetchone()[0] now = timezone.now() now = now.replace(microsecond=0) if timeout is None: exp = datetime.max elif settings.USE_TZ: exp = datetime.utcfromtimestamp(time.time() + timeout) else: exp = datetime.fromtimestamp(time.time() + timeout) exp = exp.replace(microsecond=0) if num > self._max_entries: self._cull(db, cursor, now) pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL) b64encoded = base64.b64encode(pickled) # The DB column is expecting a string, so make sure the value is a # string, not bytes. Refs #19274. if six.PY3: b64encoded = b64encoded.decode('latin1') try: # Note: typecasting for datetimes is needed by some 3rd party # database backends. All core backends work without typecasting, # so be careful about changes here - test suite will NOT pick # regressions. with transaction.atomic(using=db): cursor.execute( "SELECT cache_key, expires FROM %s " "WHERE cache_key = %%s" % table, [key]) result = cursor.fetchone() if result: current_expires = result[1] if (connections[db].features.needs_datetime_string_cast and not isinstance(current_expires, datetime)): current_expires = typecast_timestamp( str(current_expires)) exp = connections[db].ops.value_to_db_datetime(exp) if result and (mode == 'set' or (mode == 'add' and current_expires < now)): cursor.execute( "UPDATE %s SET value = %%s, expires = %%s " "WHERE cache_key = %%s" % table, [b64encoded, exp, key]) else: cursor.execute( "INSERT INTO %s (cache_key, value, expires) " "VALUES (%%s, %%s, %%s)" % table, [key, b64encoded, exp]) except DatabaseError: # To be threadsafe, updates/inserts are allowed to fail silently return False else: return True
def _base_set(self, mode, key, value, timeout=DEFAULT_TIMEOUT): if timeout == DEFAULT_TIMEOUT: timeout = self.default_timeout db = router.db_for_write(self.cache_model_class) table = connections[db].ops.quote_name(self._table) cursor = connections[db].cursor() cursor.execute("SELECT COUNT(*) FROM %s" % table) num = cursor.fetchone()[0] now = timezone.now() now = now.replace(microsecond=0) if timeout is None: exp = datetime.max elif settings.USE_TZ: exp = datetime.utcfromtimestamp(time.time() + timeout) else: exp = datetime.fromtimestamp(time.time() + timeout) exp = exp.replace(microsecond=0) if num > self._max_entries: self._cull(db, cursor, now) pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL) b64encoded = base64.b64encode(pickled) # The DB column is expecting a string, so make sure the value is a # string, not bytes. Refs #19274. if six.PY3: b64encoded = b64encoded.decode('latin1') try: # Note: typecasting for datetimes is needed by some 3rd party # database backends. All core backends work without typecasting, # so be careful about changes here - test suite will NOT pick # regressions. with transaction.atomic(using=db): cursor.execute("SELECT cache_key, expires FROM %s " "WHERE cache_key = %%s" % table, [key]) result = cursor.fetchone() if result: current_expires = result[1] if (connections[db].features.needs_datetime_string_cast and not isinstance(current_expires, datetime)): current_expires = typecast_timestamp(str(current_expires)) exp = connections[db].ops.value_to_db_datetime(exp) if result and (mode == 'set' or (mode == 'add' and current_expires < now)): cursor.execute("UPDATE %s SET value = %%s, expires = %%s " "WHERE cache_key = %%s" % table, [b64encoded, exp, key]) else: cursor.execute("INSERT INTO %s (cache_key, value, expires) " "VALUES (%%s, %%s, %%s)" % table, [key, b64encoded, exp]) except DatabaseError: # To be threadsafe, updates/inserts are allowed to fail silently return False else: return True
def results_iter(self): needs_string_cast = self.connection.features.needs_datetime_string_cast for rows in self.execute_sql(MULTI): for row in rows: if needs_string_cast: vals = [typecast_timestamp(str(row[0])), row[1]] else: vals = row yield vals
def _sqlite_date_extract(lookup_type, dt): if dt is None: return None try: dt = util.typecast_timestamp(dt) except (ValueError, TypeError): return None if lookup_type == 'week_day': return (dt.isoweekday() % 7) + 1 else: return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt): try: dt = util.typecast_timestamp(dt) except (ValueError, TypeError): return None if lookup_type == 'year': return "%i-01-01 00:00:00" % dt.year elif lookup_type == 'month': return "%i-%02i-01 00:00:00" % (dt.year, dt.month) elif lookup_type == 'day': return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
def _sqlite_date_trunc(lookup_type, dt): try: dt = util.typecast_timestamp(dt) except (ValueError, TypeError): return None if lookup_type == 'year': return "%i-01-01" % dt.year elif lookup_type == 'month': return "%i-%02i-01" % (dt.year, dt.month) elif lookup_type == 'day': return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def combined_message_log_row(row): # order of fields output by combined_message_log: # [0] direction [1] message_id # [2] message_date [3] message_text return { "direction": row[0], "pk": row[1], "date": typecast_timestamp(row[2]), "text": row[3] }
def _sqlite_format_dtdelta(dt, conn, days, secs, usecs): try: dt = util.typecast_timestamp(dt) delta = datetime.timedelta(int(days), int(secs), int(usecs)) if conn.strip() == '+': dt = dt + delta else: dt = dt - delta except (ValueError, TypeError): return None # typecast_timestamp returns a date or a datetime without timezone. # It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]" return str(dt)
def _sqlite_datetime_extract(lookup_type, dt, tzname): if dt is None: return None try: dt = util.typecast_timestamp(dt) except (ValueError, TypeError): return None if tzname is not None: dt = timezone.localtime(dt, pytz.timezone(tzname)) if lookup_type == 'week_day': return (dt.isoweekday() % 7) + 1 else: return getattr(dt, lookup_type)
def results_iter(self): if self.connection.ops.oracle: from django.db.models.fields import DateTimeField fields = [DateTimeField()] else: needs_string_cast = self.connection.features.needs_datetime_string_cast offset = len(self.query.extra_select) for rows in self.execute_sql(MULTI): for row in rows: date = row[offset] if self.connection.ops.oracle: date = self.resolve_columns(row, fields)[offset] elif needs_string_cast: date = typecast_timestamp(str(date)) yield date
def __combined_message_log_row(row): reporter = None connection = None # order of fields output by combined_message_log: # [0] direction [1] message_id [2] message_date # [3] message_text [4] reporter_id [5] reporter_first_name # [6] reporter_last_name [7] backend_id [8] backend_title # [9] backend_slug [10] connection_id [11] connection_identity # if this message is linked to a reporter, create a Reporter object # (so we can call the methods like full_name) without hitting the # database each time. note that not all fields were fetched, so the # object won't work fully, but enough to display it if row[4] is not None: reporter = Reporter( first_name=row[5], last_name=row[6], pk=row[4]) # likewise for a backend+connection, if this message isn't # linked to a reporter. combined_message_log can't filter # by connections (yet), but they must be displayed if row[7] is not None: backend = PersistantBackend( title=row[8], slug=row[9], id=row[7]) connection = PersistantConnection( backend=backend, identity=row[11], id=row[10]) # If the date object is already a datetime, don't bother # casting it. Otherwise do. casted_date = row[2] if not isinstance(casted_date, datetime): casted_date = typecast_timestamp(row[2]) return { "direction": row[0], "pk": row[1], "date": casted_date, "text": row[3], "reporter": reporter, "connection": connection }
def iterator(self): from django.db.backends.util import typecast_timestamp from django.db.models.fields import DateTimeField qn = connection.ops.quote_name self._order_by = () # Clear this because it'll mess things up otherwise. if self._field.null: self._where.append('%s.%s IS NOT NULL' % \ (qn(self.model._meta.db_table), qn(self._field.column))) try: select, sql, params = self._get_sql_clause() except EmptyResultSet: raise StopIteration table_name = qn(self.model._meta.db_table) field_name = qn(self._field.column) if connection.features.allows_group_by_ordinal: group_by = '1' else: group_by = connection.ops.date_trunc_sql(self._kind, '%s.%s' % (table_name, field_name)) sql = 'SELECT %s %s GROUP BY %s ORDER BY 1 %s' % \ (connection.ops.date_trunc_sql(self._kind, '%s.%s' % (qn(self.model._meta.db_table), qn(self._field.column))), sql, group_by, self._order) cursor = connection.cursor() cursor.execute(sql, params) has_resolve_columns = hasattr(self, 'resolve_columns') needs_datetime_string_cast = connection.features.needs_datetime_string_cast dates = [] # It would be better to use self._field here instead of DateTimeField(), # but in Oracle that will result in a list of datetime.date instead of # datetime.datetime. fields = [DateTimeField()] while 1: rows = cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE) if not rows: return dates for row in rows: date = row[0] if has_resolve_columns: date = self.resolve_columns([date], fields)[0] elif needs_datetime_string_cast: date = typecast_timestamp(str(date)) dates.append(date)
def _sqlite_datetime_trunc(lookup_type, dt, tzname): try: dt = util.typecast_timestamp(dt) except (ValueError, TypeError): return None if tzname is not None: dt = timezone.localtime(dt, pytz.timezone(tzname)) if lookup_type == 'year': return "%i-01-01 00:00:00" % dt.year elif lookup_type == 'month': return "%i-%02i-01 00:00:00" % (dt.year, dt.month) elif lookup_type == 'day': return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day) elif lookup_type == 'hour': return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour) elif lookup_type == 'minute': return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) elif lookup_type == 'second': return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def convert_values_sqlite(self, value, field): """SQLite returns floats when it should be returning decimals, and gets dates and datetimes wrong. For consistency with other backends, coerce when required. """ internal_type = field.get_internal_type() if internal_type == 'DecimalField': return util.typecast_decimal(field.format_number(value)) elif internal_type and internal_type.endswith('IntegerField') or internal_type.endswith('AutoField'): return int(value) elif internal_type == 'DateField': return util.typecast_date(value) elif internal_type == 'DateTimeField': return util.typecast_timestamp(value) elif internal_type == 'TimeField': return util.typecast_time(value) # No field, or the field isn't known to be a decimal or integer return value
def convert_values_sqlite(self, value, field): """SQLite returns floats when it should be returning decimals, and gets dates and datetimes wrong. For consistency with other backends, coerce when required. """ internal_type = field.get_internal_type() if internal_type == 'DecimalField': return util.typecast_decimal(field.format_number(value)) elif internal_type and internal_type.endswith( 'IntegerField') or internal_type.endswith('AutoField'): return int(value) elif internal_type == 'DateField': return util.typecast_date(value) elif internal_type == 'DateTimeField': return util.typecast_timestamp(value) elif internal_type == 'TimeField': return util.typecast_time(value) # No field, or the field isn't known to be a decimal or integer return value
def iterator(self): from django.db.backends.util import typecast_timestamp self._order_by = () # Clear this because it'll mess things up otherwise. if self._field.null: self._where.append('%s.%s IS NOT NULL' % \ (backend.quote_name(self.model._meta.db_table), backend.quote_name(self._field.column))) try: select, sql, params = self._get_sql_clause() except EmptyResultSet: raise StopIteration sql = 'SELECT %s %s GROUP BY 1 ORDER BY 1 %s' % \ (backend.get_date_trunc_sql(self._kind, '%s.%s' % (backend.quote_name(self.model._meta.db_table), backend.quote_name(self._field.column))), sql, self._order) cursor = connection.cursor() cursor.execute(sql, params) # We have to manually run typecast_timestamp(str()) on the results, because # MySQL doesn't automatically cast the result of date functions as datetime # objects -- MySQL returns the values as strings, instead. return [typecast_timestamp(str(row[0])) for row in cursor.fetchall()]
def results_iter(self): """ Returns an iterator over the results from executing this query. """ resolve_columns = hasattr(self, 'resolve_columns') if resolve_columns: from django.db.models.fields import DateTimeField fields = [DateTimeField()] else: from django.db.backends.util import typecast_timestamp needs_string_cast = self.connection.features.needs_datetime_string_cast offset = len(self.query.extra_select) for rows in self.execute_sql(MULTI): for row in rows: date = row[offset] if resolve_columns: date = self.resolve_columns(row, fields)[offset] elif needs_string_cast: date = typecast_timestamp(str(date)) yield date
def results_iter(self): if self.connection.ops.oracle: from django.db.models.fields import DateTimeField fields = [DateTimeField()] else: needs_string_cast = self.connection.features.needs_datetime_string_cast offset = len(self.query.extra_select) for rows in self.execute_sql(MULTI): for row in rows: datetime = row[offset] if self.connection.ops.oracle: datetime = self.resolve_columns(row, fields)[offset] elif needs_string_cast: datetime = typecast_timestamp(str(datetime)) # Datetimes are artifically returned in UTC on databases that # don't support time zone. Restore the zone used in the query. if settings.USE_TZ: datetime = datetime.replace(tzinfo=None) datetime = timezone.make_aware(datetime, self.query.tzinfo) yield datetime
def results_iter(self): """ Returns an iterator over the results from executing this query. """ resolve_columns = hasattr(self, 'resolve_columns') if resolve_columns: from django.db.models.fields import DateTimeField fields = [DateTimeField()] else: from django.db.backends.util import typecast_timestamp needs_string_cast = self.connection.features.needs_datetime_string_cast offset = len(self.extra_select) for rows in self.execute_sql(MULTI): for row in rows: date = row[offset] if resolve_columns: date = self.resolve_columns([date], fields)[0] elif needs_string_cast: date = typecast_timestamp(str(date)) yield date
def __combined_message_log_row(row): reporter = None connection = None # order of fields output by combined_message_log: # [0] direction [1] message_id [2] message_date # [3] message_text [4] reporter_id [5] reporter_first_name # [6] reporter_last_name [7] backend_id [8] backend_title # [9] backend_slug [10] connection_id [11] connection_identity # if this message is linked to a reporter, create a Reporter object # (so we can call the methods like full_name) without hitting the # database each time. note that not all fields were fetched, so the # object won't work fully, but enough to display it if row[4] is not None: reporter = Reporter(first_name=row[5], last_name=row[6], pk=row[4]) # likewise for a backend+connection, if this message isn't # linked to a reporter. combined_message_log can't filter # by connections (yet), but they must be displayed if row[7] is not None: backend = PersistantBackend(title=row[8], slug=row[9], id=row[7]) connection = PersistantConnection(backend=backend, identity=row[11], id=row[10]) # If the date object is already a datetime, don't bother # casting it. Otherwise do. casted_date = row[2] if not isinstance(casted_date, datetime): casted_date = typecast_timestamp(row[2]) return { "direction": row[0], "pk": row[1], "date": casted_date, "text": row[3], "reporter": reporter, "connection": connection }
def iterator(self): from django.db.backends.util import typecast_timestamp self._order_by = ( ) # Clear this because it'll mess things up otherwise. if self._field.null: self._where.append('%s.%s IS NOT NULL' % \ (backend.quote_name(self.model._meta.db_table), backend.quote_name(self._field.column))) try: select, sql, params = self._get_sql_clause() except EmptyResultSet: raise StopIteration sql = 'SELECT %s %s GROUP BY 1 ORDER BY 1 %s' % \ (backend.get_date_trunc_sql(self._kind, '%s.%s' % (backend.quote_name(self.model._meta.db_table), backend.quote_name(self._field.column))), sql, self._order) cursor = connection.cursor() cursor.execute(sql, params) # We have to manually run typecast_timestamp(str()) on the results, because # MySQL doesn't automatically cast the result of date functions as datetime # objects -- MySQL returns the values as strings, instead. return [typecast_timestamp(str(row[0])) for row in cursor.fetchall()]
def prepare_client_lists(request, timestamp="now"): # I suggest we implement "expiration" here. timestamp = timestamp.replace("@", " ") # client_list = Client.objects.all().order_by('name')#change this to order by interaction's state client_interaction_dict = {} clean_client_list = [] bad_client_list = [] extra_client_list = [] modified_client_list = [] stale_up_client_list = [] # stale_all_client_list = [] down_client_list = [] cursor = connection.cursor() interact_queryset = Interaction.objects.interaction_per_client(timestamp) # or you can specify a time like this: '2007-01-01 00:00:00' [client_interaction_dict.__setitem__(x.client_id, x) for x in interact_queryset] client_list = Client.objects.active(timestamp).filter(id__in=client_interaction_dict.keys()).order_by("name") [ clean_client_list.append(x) for x in Client.objects.active(timestamp).filter( id__in=[y.client_id for y in interact_queryset.filter(state="clean")] ) ] [ bad_client_list.append(x) for x in Client.objects.active(timestamp).filter( id__in=[y.client_id for y in interact_queryset.filter(state="dirty")] ) ] client_ping_dict = {} [client_ping_dict.__setitem__(x, "Y") for x in client_interaction_dict.keys()] # unless we know otherwise... try: cursor.execute( "select reports_ping.status, x.client_id from (select client_id, MAX(endtime) " + "as timer from reports_ping GROUP BY client_id) x, reports_ping where " + "reports_ping.client_id = x.client_id AND reports_ping.endtime = x.timer" ) [client_ping_dict.__setitem__(x[1], x[0]) for x in cursor.fetchall()] except: pass # This is to fix problems when you have only zero records returned client_down_ids = [y for y in client_ping_dict.keys() if client_ping_dict[y] == "N"] if not client_down_ids == []: [down_client_list.append(x) for x in Client.objects.active(timestamp).filter(id__in=client_down_ids)] if timestamp == "now" or timestamp == None: cursor.execute("select client_id, MAX(timestamp) as timestamp from reports_interaction GROUP BY client_id") results = cursor.fetchall() for x in results: if type(x[1]) == type("") or type(x[1]) == type(u""): ts = util.typecast_timestamp(x[1]) else: ts = x[1] stale_all_client_list = Client.objects.active(timestamp).filter( id__in=[x[0] for x in results if datetime.now() - ts > timedelta(days=1)] ) else: cursor.execute( "select client_id, timestamp, MAX(timestamp) as timestamp from reports_interaction " + "WHERE timestamp < %s GROUP BY client_id", [timestamp], ) t = strptime(timestamp, "%Y-%m-%d %H:%M:%S") datetimestamp = datetime(t[0], t[1], t[2], t[3], t[4], t[5]) results = cursor.fetchall() for x in results: if type(x[1]) == type(""): x[1] = util.typecast_timestamp(x[1]) stale_all_client_list = Client.objects.active(timestamp).filter( id__in=[x[0] for x in results if datetimestamp - x[1] > timedelta(days=1)] ) [stale_up_client_list.append(x) for x in stale_all_client_list if not client_ping_dict[x.id] == "N"] cursor.execute( "SELECT reports_client.id FROM reports_client, reports_interaction, reports_entries_interactions WHERE reports_client.id = reports_interaction.client_id AND reports_client.current_interaction_id = reports_entries_interactions.interaction_id and reports_entries_interactions.type=%s GROUP BY reports_client.id", [TYPE_MODIFIED], ) modified_client_list = Client.objects.active(timestamp).filter(id__in=[x[0] for x in cursor.fetchall()]) cursor.execute( "SELECT reports_client.id FROM reports_client, reports_interaction, reports_entries_interactions WHERE reports_client.id = reports_interaction.client_id AND reports_client.current_interaction_id = reports_entries_interactions.interaction_id and reports_entries_interactions.type=%s GROUP BY reports_client.id", [TYPE_EXTRA], ) extra_client_list = Client.objects.active(timestamp).filter(id__in=[x[0] for x in cursor.fetchall()]) if timestamp == "now": timestamp = datetime.now().isoformat("@") return { "client_list": client_list, "client_interaction_dict": client_interaction_dict, "clean_client_list": clean_client_list, "bad_client_list": bad_client_list, "extra_client_list": extra_client_list, "modified_client_list": modified_client_list, "stale_up_client_list": stale_up_client_list, "stale_all_client_list": stale_all_client_list, "down_client_list": down_client_list, "timestamp": timestamp, "timestamp_date": timestamp[:10], "timestamp_time": timestamp[11:19], }
def results_iter(self): offset = len(self.query.extra_select) for rows in self.execute_sql(MULTI): for row in rows: date = typecast_timestamp(str(row[offset])) yield date
def _sqlite_extract(lookup_type, dt): try: dt = util.typecast_timestamp(dt) except (ValueError, TypeError): return None return str(getattr(dt, lookup_type))