def serialize(v): global _have_warned_about_timestamps try: converted = calendar.timegm(v.utctimetuple()) converted = converted * 1e3 + getattr(v, 'microsecond', 0) / 1e3 except AttributeError: # Ints and floats are valid timestamps too if type(v) not in _number_types: raise TypeError('DateType arguments must be a datetime or timestamp') if not _have_warned_about_timestamps: _have_warned_about_timestamps = True warnings.warn("timestamp columns in Cassandra hold a number of " "milliseconds since the unix epoch. Currently, when executing " "prepared statements, this driver multiplies timestamp " "values by 1000 so that the result of time.time() " "can be used directly. However, the driver cannot " "match this behavior for non-prepared statements, " "so the 2.0 version of the driver will no longer multiply " "timestamps by 1000. It is suggested that you simply use " "datetime.datetime objects for 'timestamp' values to avoid " "any ambiguity and to guarantee a smooth upgrade of the " "driver.") converted = v * 1e3 return int64_pack(long(converted))
def execute_graph_async(self, query, parameters=None, trace=False, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): """ Execute the graph query and return a `ResponseFuture <http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.ResponseFuture.result>`_ object which callbacks may be attached to for asynchronous response delivery. You may also call ``ResponseFuture.result()`` to synchronously block for results at any time. """ if not isinstance(query, SimpleGraphStatement): query = SimpleGraphStatement(query) graph_parameters = None if parameters: graph_parameters = self._transform_params(parameters) execution_profile = self._get_execution_profile(execution_profile) # look up instance here so we can apply the extended attributes try: options = execution_profile.graph_options.copy() except AttributeError: raise ValueError("Execution profile for graph queries must derive from GraphExecutionProfile, and provide graph_options") custom_payload = options.get_options_map() custom_payload[_request_timeout_key] = int64_pack(long(execution_profile.request_timeout * 1000)) future = self._create_response_future(query, parameters=None, trace=trace, custom_payload=custom_payload, timeout=_NOT_SET, execution_profile=execution_profile) future.message._query_params = graph_parameters future._protocol_handler = self.client_protocol_handler if options.is_analytics_source and isinstance(execution_profile.load_balancing_policy, DSELoadBalancingPolicy): self._target_analytics_master(future) else: future.send_request() return future
def serialize(v): try: converted = calendar.timegm(v.utctimetuple()) converted = converted * 1e3 + getattr(v, 'microsecond', 0) / 1e3 except AttributeError: # Ints and floats are valid timestamps too if type(v) not in _number_types: raise TypeError('DateType arguments must be a datetime or timestamp') converted = v * 1e3 return int64_pack(long(converted))
def serialize(v, protocol_version): try: # v is datetime timestamp_seconds = calendar.timegm(v.utctimetuple()) timestamp = timestamp_seconds * 1e3 + getattr(v, 'microsecond', 0) / 1e3 except AttributeError: # Ints and floats are valid timestamps too if type(v) not in _number_types: raise TypeError('DateType arguments must be a datetime or timestamp') timestamp = v return int64_pack(long(timestamp))
def execute_graph_async(self, query, parameters=None, trace=False, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): """ Execute the graph query and return a `ResponseFuture <http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.ResponseFuture.result>`_ object which callbacks may be attached to for asynchronous response delivery. You may also call ``ResponseFuture.result()`` to synchronously block for results at any time. """ if not isinstance(query, SimpleGraphStatement): query = SimpleGraphStatement(query) graph_parameters = None if parameters: graph_parameters = self._transform_params(parameters) execution_profile = self._get_execution_profile( execution_profile ) # look up instance here so we can apply the extended attributes try: options = execution_profile.graph_options.copy() except AttributeError: raise ValueError( "Execution profile for graph queries must derive from GraphExecutionProfile, and provide graph_options" ) custom_payload = options.get_options_map() custom_payload[_request_timeout_key] = int64_pack( long(execution_profile.request_timeout * 1000)) future = self._create_response_future( query, parameters=None, trace=trace, custom_payload=custom_payload, timeout=_NOT_SET, execution_profile=execution_profile) future.message._query_params = graph_parameters future._protocol_handler = self.client_protocol_handler if options.is_analytics_source and isinstance( execution_profile.load_balancing_policy, DSELoadBalancingPolicy): self._target_analytics_master(future) else: future.send_request() return future
def serialize(cls, v, protocol_version): buf = io.BytesIO() bound_kind, bounds = None, () try: value = v.value except AttributeError: raise ValueError( '%s.serialize expects an object with a value attribute; got' '%r' % (cls.__name__, v)) if value is None: try: lower_bound, upper_bound = v.lower_bound, v.upper_bound except AttributeError: raise ValueError( '%s.serialize expects an object with lower_bound and ' 'upper_bound attributes; got %r' % (cls.__name__, v)) if lower_bound == util.OPEN_BOUND and upper_bound == util.OPEN_BOUND: bound_kind = BoundKind.BOTH_OPEN_RANGE elif lower_bound == util.OPEN_BOUND: bound_kind = BoundKind.OPEN_RANGE_LOW bounds = (upper_bound, ) elif upper_bound == util.OPEN_BOUND: bound_kind = BoundKind.OPEN_RANGE_HIGH bounds = (lower_bound, ) else: bound_kind = BoundKind.CLOSED_RANGE bounds = lower_bound, upper_bound else: # value is not None if value == util.OPEN_BOUND: bound_kind = BoundKind.SINGLE_DATE_OPEN else: bound_kind = BoundKind.SINGLE_DATE bounds = (value, ) if bound_kind is None: raise ValueError('Cannot serialize %r; could not find bound kind' % (v, )) buf.write(int8_pack(BoundKind.to_int(bound_kind))) for bound in bounds: buf.write(int64_pack(bound.milliseconds)) buf.write(int8_pack(cls._encode_precision(bound.precision))) return buf.getvalue()
def serialize(val, protocol_version): try: nano = val.nanosecond_time except AttributeError: nano = util.Time(val).nanosecond_time return int64_pack(nano)
def serialize(byts, protocol_version): return int64_pack(byts)