コード例 #1
0
ファイル: handlers_backend.py プロジェクト: nodirt/luci-py
  def post(self, namespace, timestamp):
    digests = []
    now = utils.timestamp_to_datetime(long(timestamp))
    expiration = config.settings().default_expiration
    try:
      digests = payload_to_hashes(self, namespace)
      # Requests all the entities at once.
      futures = ndb.get_multi_async(
          model.entry_key(namespace, binascii.hexlify(d)) for d in digests)

      to_save = []
      while futures:
        # Return opportunistically the first entity that can be retrieved.
        future = ndb.Future.wait_any(futures)
        futures.remove(future)
        item = future.get_result()
        if item and item.next_tag_ts < now:
          # Update the timestamp. Add a bit of pseudo randomness.
          item.expiration_ts, item.next_tag_ts = model.expiration_jitter(
              now, expiration)
          to_save.append(item)
      if to_save:
        ndb.put_multi(to_save)
      logging.info(
          'Timestamped %d entries out of %s', len(to_save), len(digests))
    except Exception as e:
      logging.error('Failed to stamp entries: %s\n%d entries', e, len(digests))
      raise
コード例 #2
0
ファイル: replication.py プロジェクト: mellowdistrict/luci-py
def push_auth_db(revision, auth_db):
  """Accepts AuthDB push from Primary and applies it to replica.

  Args:
    revision: replication_pb2.AuthDBRevision describing revision of pushed DB.
    auth_db: replication_pb2.AuthDB with pushed DB.

  Returns:
    Tuple (True if update was applied, stored or updated AuthReplicationState).
  """
  # Already up-to-date? Check it first before doing heavy calls.
  state = model.get_replication_state()
  if (state.primary_id == revision.primary_id and
      state.auth_db_rev >= revision.auth_db_rev):
    return False, state

  # Try to apply it, retry until success (or until some other task applies
  # an even newer version of auth_db).
  snapshot = proto_to_auth_db_snapshot(auth_db)
  while True:
    applied, current_state = replace_auth_db(
        revision.auth_db_rev,
        utils.timestamp_to_datetime(revision.modified_ts),
        snapshot)

    # Update was successfully applied.
    if applied:
      return True, current_state

    # Some other task managed to apply the update already.
    if current_state.auth_db_rev >= revision.auth_db_rev:
      return False, current_state

    # Need to retry. Try until success or deadline.
    assert current_state.auth_db_rev < revision.auth_db_rev
コード例 #3
0
ファイル: impl_test.py プロジェクト: eunchong/infra
  def test_fetch(self):
    service = impl.CASService(
        '/bucket/real', '/bucket/temp',
        auth.ServiceAccountKey('*****@*****.**', 'PEM private key', 'id'))

    # Actual _rsa_sign implementation depends on PyCrypto, that for some reason
    # is not importable in unit tests. _rsa_sign is small enough to be "tested"
    # manually on the dev server.
    calls = []
    def fake_sign(pkey, data):
      calls.append((pkey, data))
      return '+signature+'
    self.mock(service, '_rsa_sign', fake_sign)
    self.mock_now(utils.timestamp_to_datetime(1416444987 * 1000000.))

    # Signature and email should be urlencoded.
    url = service.generate_fetch_url('SHA1', 'a' * 40)
    self.assertEqual(
        'https://storage.googleapis.com/bucket/real/SHA1/'
        'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?'
        'GoogleAccessId=account%40email.com&'
        'Expires=1416448587&'
        'Signature=%2Bsignature%2B', url)

    # Since _rsa_sign is mocked out, at least verify it is called as expected.
    self.assertEqual([(
      'PEM private key',
      'GET\n\n\n1416448587\n/bucket/real/SHA1/' + 'a'*40
    )], calls)
コード例 #4
0
ファイル: api.py プロジェクト: eunchong/infra
def parse_datetime(timestamp):
  if timestamp is None:
    return None
  try:
    return utils.timestamp_to_datetime(timestamp)
  except OverflowError:
    raise errors.InvalidInputError(
      'Could not parse timestamp: %s' % timestamp)
コード例 #5
0
  def get(self):
    min_ts = utils.timestamp_to_datetime(0)
    now = utils.utcnow()

    for machine_key in models.CatalogMachineEntry.query(
        models.CatalogMachineEntry.lease_expiration_ts < now,
        # Also filter out unassigned machines, i.e. CatalogMachineEntries
        # where lease_expiration_ts is None. None sorts before min_ts.
        models.CatalogMachineEntry.lease_expiration_ts > min_ts,
    ).fetch(keys_only=True):
      reclaim_machine(machine_key, now)
コード例 #6
0
def epoch_to_datetime(value):
  """Converts a messages.FloatField that represents a timestamp since epoch in
  seconds to a datetime.datetime.

  Returns None when input is 0 or None.
  """
  if not value:
    return None
  try:
    return utils.timestamp_to_datetime(value*1000000.)
  except OverflowError as e:
    raise ValueError(e)
コード例 #7
0
ファイル: metrics.py プロジェクト: misscache/luci-py
 def make_point_dict(p):
   # See https://cloud.google.com/monitoring/v2beta2/timeseries/write.
   desc = descriptors[p['desc']]
   labels = {l[0]: v for l, v in zip(desc.labels, p['labels'])}
   point = p['point']
   desc.validate_value(point['value'])
   value_key = '%sValue' % desc.value_type
   return {
     'timeseriesDesc': {
       'metric': 'custom.cloudmonitoring.googleapis.com/%s' % desc.name,
       'labels': {
         'custom.cloudmonitoring.googleapis.com/%s' % k: v
         for k, v in labels.iteritems()
       },
     },
     'point': {
       'start': utils.timestamp_to_datetime(
           point['start']).strftime(_TS_FORMAT),
       'end': utils.timestamp_to_datetime(
           point['end']).strftime(_TS_FORMAT),
       value_key: point['value'],
     },
   }
コード例 #8
0
ファイル: impl_test.py プロジェクト: eunchong/infra
  def test_create_upload_session_and_fetch_upload_session(self):
    service = impl.CASService('/bucket/real', '/bucket/temp')

    mocked_time = utils.timestamp_to_datetime(1416444987 * 1000000.)
    self.mock_now(mocked_time)

    def mocked_open(filename, mode, retry_params):
      self.assertEqual(filename, '/bucket/temp/1416444987_1')
      self.assertEqual(mode, 'w')
      self.assertEqual(retry_params, service._retry_params)
      # Mock guts of ReadingBuffer :(
      return common.Mock(
          _path_with_token='/bucket/temp/1416444987_1?upload_id=abc',
          _api=common.Mock(api_url='https://fake.com'))
    self.mock(impl.cloudstorage, 'open', mocked_open)

    obj, signed_id = service.create_upload_session(
        'SHA1', 'a' * 40, auth_testing.DEFAULT_MOCKED_IDENTITY)
    self.assertEqual(obj.key.id(), 1)
    self.assertEqual(obj.to_dict(), {
      'created_by': auth_testing.DEFAULT_MOCKED_IDENTITY,
      'created_ts': mocked_time,
      'error_message': None,
      'final_gs_location': '/bucket/real/SHA1/' + 'a' * 40,
      'hash_algo': 'SHA1',
      'hash_digest': 'a' * 40,
      'status': impl.UploadSession.STATUS_UPLOADING,
      'temp_gs_location': '/bucket/temp/1416444987_1',
      'upload_url': 'https://fake.com/bucket/temp/1416444987_1?upload_id=abc',
    })

    # Token should be readable.
    embedded = impl.UploadIdSignature.validate(
        signed_id, [auth_testing.DEFAULT_MOCKED_IDENTITY.to_bytes()])
    self.assertEqual(embedded, {'id': '1'})

    # Verify fetch_upload_session can use it too.
    fetched = service.fetch_upload_session(
        signed_id, auth_testing.DEFAULT_MOCKED_IDENTITY)
    self.assertIsNotNone(fetched)
    self.assertEqual(fetched.to_dict(), obj.to_dict())
コード例 #9
0
ファイル: replication.py プロジェクト: misscache/luci-py
def _update_state_on_success(
    key, started_ts, finished_ts, current_revision, auth_code_version):
  """Updates AuthReplicaState after a successful push.

  Args:
    key: key of AuthReplicaState entity to update.
    started_ts: datetime timestamp of when push was initiated.
    finished_ts: datetime timestamp of when push was completed.
    current_revision: an instance of AuthDBRevision as reported by Replica.
    auth_code_version: components.auth.version.__version__ on replica.

  Returns:
    Auth DB revision of replica as it is stored in DB after the update. May be
    different from current_revision.auth_db_rev (in case some other task
    already managed to update the replica).
  """
  # Currently stored state. May be ahead of the state initially fetched in
  # 'update_replicas_task'. If missing, the replica was removed from
  # replication list (and shouldn't be added back).
  state = key.get()
  if not state:
    return None

  # The state was updated by some other task already?
  if state.auth_db_rev >= current_revision.auth_db_rev:
    return state.auth_db_rev

  # Update stored revision, mark last push as success.
  state.auth_db_rev = current_revision.auth_db_rev
  state.rev_modified_ts = utils.timestamp_to_datetime(
      current_revision.modified_ts)
  state.auth_code_version = auth_code_version
  state.push_started_ts = started_ts
  state.push_finished_ts = finished_ts
  state.push_status = PUSH_STATUS_SUCCESS
  state.push_error = ''
  state.put()

  return state.auth_db_rev
コード例 #10
0
ファイル: serializable.py プロジェクト: misscache/luci-py
  _simple_to_rich_converters.append(
      (property_cls, include_subclasses, simple_to_rich))


### Function calls.


_register_simple_converters()


# TODO(vadimsh): Add ndb.DateProperty if needed.
register_converter(
    property_cls=ndb.DateTimeProperty,
    include_subclasses=False,
    rich_to_simple=lambda _prop, x: utils.datetime_to_timestamp(x),
    simple_to_rich=lambda _prop, x: utils.timestamp_to_datetime(x))


# Handles all property classes inherited from JsonSerializableProperty.
register_converter(
    property_cls=JsonSerializableProperty,
    include_subclasses=True,
    rich_to_simple=lambda prop, value: value.to_jsonish(),
    simple_to_rich=lambda prop, value: prop._value_type.from_jsonish(value))


# Handles all property classes inherited from BytesSerializableProperty.
register_converter(
    property_cls=BytesSerializableProperty,
    include_subclasses=True,
    rich_to_simple=lambda prop, value: value.to_bytes(),
コード例 #11
0
def get_oauth_token_grant(service_account, validity_duration):
    """Returns "OAuth token grant" that allows usage of the service account.

  OAuth token grant is a signed assertion that basically says "the token server
  approves the usage of <service_account> by the <end-user>, and this assertion
  is valid for <validity_duration>".

  This function is called when the task is posted, while the end-user is still
  present. The grant it either generated by contacting the token server or
  fetched from the cache (if the cached one lives long enough).

  This function must not be used if 'has_token_server()' returns False. It will
  raise assertion error.

  The grant is later passed back to the token server to generate an actual OAuth
  access token. When this happens, the token server rechecks the ACLs, so it's
  fine to have large 'validity_duration' here. It basically defines for how long
  to cache "positive" ACL check.

  Args:
    service_account: a service account email to use.
    validity_duration: timedelta with how long the returned grant should live.

  Returns:
    Base64-encoded string with the grant body.

  Raises:
    PermissionError if the token server forbids the usage.
    MisconfigurationError if the service account is misconfigured.
    InternalError if the RPC fails unexpectedly.
  """
    assert has_token_server()
    assert is_service_account(service_account), service_account

    end_user = auth.get_current_identity()

    existing_grant = None
    existing_exp_ts = None

    # Try to find a cached token first.
    cache_key = _oauth_token_grant_cache_key(service_account, end_user)
    cached = memcache.get(cache_key, namespace=_OAUTH_TOKEN_GRANT_CACHE_NS)
    if cached:
        try:
            existing_grant = cached['oauth_token_grant']
            existing_exp_ts = utils.timestamp_to_datetime(cached['exp_ts'])
            if not isinstance(existing_grant, str):
                raise TypeError('"oauth_token_grant" should be str')
        except (KeyError, ValueError, TypeError):
            # Treat malformed data as a cache miss. This should not happen generally.
            logging.exception(
                'Failed to parse oauth token grant cache entry: %s')
            existing_grant = None
            existing_exp_ts = None

    # Randomly "expire" a cached token a bit prematurely to avoid a storm of
    # refresh requests when it expires for everyone for real. With a randomization
    # only few unlucky requests (most likely one) will hit the token refresh
    # procedure.
    now = utils.utcnow()
    if existing_exp_ts:
        rnd = datetime.timedelta(seconds=random.randint(0, 600))
        if now > existing_exp_ts - rnd:
            existing_grant = None
            existing_exp_ts = None

    # Does the cached token live long enough to be useful for the caller?
    if existing_exp_ts and existing_exp_ts > now + validity_duration:
        _log_token_grant('Using cached', existing_grant, existing_exp_ts)
        return existing_grant

    # Need to make a new token either because the cached one has expired or it
    # doesn't live long enough.
    #
    # We give the new token 1h of extra lifetime to make sure it can be reused by
    # next ~1h worth of tasks (assuming all tasks request exact same lifetime).
    # Without this trick each new task will attempt to generate new token, seeing
    # that the cached one expired just a few moments ago. With 1h extra lifetime
    # we effectively cache the token for 1h (minus 0-10 min due to the expiration
    # randomization above).
    #
    # Note: this call raises auth.AuthorizationError if the current caller is not
    # allowed to use the service account.
    new_grant, new_exp_ts = _mint_oauth_token_grant(
        service_account, end_user,
        validity_duration + datetime.timedelta(hours=1))

    # Verify the token server produces a token that lives long enough. The expiry
    # of new token must surely be above validity_duration, since we request 1h of
    # extra life.
    if new_exp_ts < now + validity_duration:
        _log_token_grant('Got unexpectedly short-lived',
                         new_grant,
                         new_exp_ts,
                         log_call=logging.error)
        raise InternalError(
            'Got unexpectedly short-lived grant, see server logs')

    # New token is good.
    memcache.set(key=cache_key,
                 value={
                     'oauth_token_grant': new_grant,
                     'exp_ts': utils.datetime_to_timestamp(new_exp_ts),
                 },
                 time=utils.datetime_to_timestamp(new_exp_ts) / 1e6,
                 namespace=_OAUTH_TOKEN_GRANT_CACHE_NS)

    _log_token_grant('Generated new', new_grant, new_exp_ts)
    return new_grant
コード例 #12
0
def proto_to_auth_db_snapshot(auth_db_proto):
  """Given replication_pb2.AuthDB message returns AuthDBSnapshot."""
  # Explicit conversion to 'list' is needed here since protobuf magic doesn't
  # stack with NDB magic.
  global_config = model.AuthGlobalConfig(
      key=model.root_key(),
      oauth_client_id=auth_db_proto.oauth_client_id,
      oauth_client_secret=auth_db_proto.oauth_client_secret,
      oauth_additional_client_ids=list(
          auth_db_proto.oauth_additional_client_ids))

  groups = [
    model.AuthGroup(
        key=model.group_key(msg.name),
        members=[model.Identity.from_bytes(x) for x in msg.members],
        globs=[model.IdentityGlob.from_bytes(x) for x in msg.globs],
        nested=list(msg.nested),
        description=msg.description,
        owners=msg.owners or model.ADMIN_GROUP,
        created_ts=utils.timestamp_to_datetime(msg.created_ts),
        created_by=model.Identity.from_bytes(msg.created_by),
        modified_ts=utils.timestamp_to_datetime(msg.modified_ts),
        modified_by=model.Identity.from_bytes(msg.modified_by))
    for msg in auth_db_proto.groups
  ]

  secrets = [
    model.AuthSecret(
        id=msg.name,
        parent=model.secret_scope_key('global'),
        values=list(msg.values),
        modified_ts=utils.timestamp_to_datetime(msg.modified_ts),
        modified_by=model.Identity.from_bytes(msg.modified_by))
    for msg in auth_db_proto.secrets
  ]

  ip_whitelists = [
    model.AuthIPWhitelist(
        key=model.ip_whitelist_key(msg.name),
        subnets=list(msg.subnets),
        description=msg.description,
        created_ts=utils.timestamp_to_datetime(msg.created_ts),
        created_by=model.Identity.from_bytes(msg.created_by),
        modified_ts=utils.timestamp_to_datetime(msg.modified_ts),
        modified_by=model.Identity.from_bytes(msg.modified_by))
    for msg in auth_db_proto.ip_whitelists
  ]

  ip_whitelist_assignments = model.AuthIPWhitelistAssignments(
      key=model.ip_whitelist_assignments_key(),
      assignments=[
        model.AuthIPWhitelistAssignments.Assignment(
            identity=model.Identity.from_bytes(msg.identity),
            ip_whitelist=msg.ip_whitelist,
            comment=msg.comment,
            created_ts=utils.timestamp_to_datetime(msg.created_ts),
            created_by=model.Identity.from_bytes(msg.created_by))
        for msg in auth_db_proto.ip_whitelist_assignments
      ],
  )

  return AuthDBSnapshot(
      global_config, groups, secrets, ip_whitelists, ip_whitelist_assignments)
コード例 #13
0
ファイル: handlers.py プロジェクト: mcgreevy/chromium-infra
    def segment(self, payload):
        """Processes a chunk of builds in a segment.

    When finished, enqueues a flush task to persist new tag index entires.
    If there are more builds in the segment to process, enqueues itself with a
    new query cursor.

    Payload properties:
      tag: tag to reindex. Required.
      job_id: id of this backfill job. Required.
      iteration: segment task iteration. Required.
      seg_index: index of this shard. Required.
      seg_start: id of the first build in this segment. Required.
      seg_end: id of the first build in the next segment. Required.
      start_from: start from this build towards seg_end. Defaults to seg_start.
      started_ts: timestamp when we started to process this segment.
    """
        attempt = int(
            self.request.headers.get('X-AppEngine-TaskExecutionCount', 0))

        logging.info('range %d-%d', payload['seg_start'], payload['seg_end'])
        if 'start_from' in payload:
            logging.info('starting from %s', payload['start_from'])

        if attempt > 0:
            logging.warning('attempt %d', attempt)

        start_from = payload.get('start_from', payload['seg_start'])
        q = model.Build.query(
            model.Build.key >= ndb.Key(model.Build, start_from),
            model.Build.key < ndb.Key(model.Build, payload['seg_end']))
        iterator = q.iter()

        entry_count = 0
        build_count = 0
        new_entries = collections.defaultdict(list)

        # Datastore query timeout is 60 sec. Limit it to 50 sec.
        deadline = utils.utcnow() + datetime.timedelta(seconds=50)
        while (utils.utcnow() < deadline and entry_count < self.ENTRY_LIMIT
               and build_count < self.BUILD_LIMIT and iterator.has_next()):
            b = iterator.next()
            build_count += 1
            for t in b.tags:
                k, v = t.split(':', 1)
                if k == payload['tag']:
                    new_entries[v].append([b.bucket, b.key.id()])
                    entry_count += 1
        logging.info('collected %d entries from %d builds', entry_count,
                     build_count)

        if new_entries:  # pragma: no branch
            logging.info(
                'enqueuing a task to flush %d tag entries in %d TagIndex entities...',
                entry_count, len(new_entries))
            flush_payload = {
                'action': 'flush',
                'tag': payload['tag'],
                'new_entries': new_entries,
            }
            self._recurse([(None, 'tag:{tag}-flush', flush_payload)])
        if iterator.has_next():
            logging.info('enqueuing a task for the next iteration...')

            p = payload.copy()
            p['iteration'] += 1
            p['start_from'] = iterator.next().key.id()

            seg_len = payload['seg_end'] - payload['seg_start']
            percent = 100 * (p['start_from'] - payload['seg_start']) / seg_len

            try:
                self._recurse([(
                    '{job_id}-{seg_index}-{iteration}',
                    'tag:{tag}-seg:{seg_index}-percent:%d' % percent,
                    p,
                )])
            except taskqueue.TaskAlreadyExistsError:  # pragma: no cover
                pass
            return

        started_time = utils.timestamp_to_datetime(payload['started_ts'])
        logging.info('segment %d is done in %s', payload['seg_index'],
                     utils.utcnow() - started_time)
コード例 #14
0
    _rich_to_simple_converters.append(
        (property_cls, include_subclasses, rich_to_simple))
    _simple_to_rich_converters.append(
        (property_cls, include_subclasses, simple_to_rich))


### Function calls.

_register_simple_converters()

# TODO(vadimsh): Add ndb.DateProperty if needed.
register_converter(
    property_cls=ndb.DateTimeProperty,
    include_subclasses=False,
    rich_to_simple=lambda _prop, x: utils.datetime_to_timestamp(x),
    simple_to_rich=lambda _prop, x: utils.timestamp_to_datetime(x))

# Handles all property classes inherited from JsonSerializableProperty.
register_converter(
    property_cls=JsonSerializableProperty,
    include_subclasses=True,
    rich_to_simple=lambda prop, value: value.to_jsonish(),
    simple_to_rich=lambda prop, value: prop._value_type.from_jsonish(value))

# Handles all property classes inherited from BytesSerializableProperty.
register_converter(
    property_cls=BytesSerializableProperty,
    include_subclasses=True,
    rich_to_simple=lambda prop, value: value.to_bytes(),
    simple_to_rich=lambda prop, value: prop._value_type.from_bytes(value))
コード例 #15
0
ファイル: replication.py プロジェクト: mellowdistrict/luci-py
def proto_to_auth_db_snapshot(auth_db_proto):
  """Given replication_pb2.AuthDB message returns AuthDBSnapshot."""
  # Explicit conversion to 'list' is needed here since protobuf magic doesn't
  # stack with NDB magic.
  global_config = model.AuthGlobalConfig(
      key=model.root_key(),
      oauth_client_id=auth_db_proto.oauth_client_id,
      oauth_client_secret=auth_db_proto.oauth_client_secret,
      oauth_additional_client_ids=list(
          auth_db_proto.oauth_additional_client_ids))

  groups = [
    model.AuthGroup(
        key=model.group_key(msg.name),
        members=[model.Identity.from_bytes(x) for x in msg.members],
        globs=[model.IdentityGlob.from_bytes(x) for x in msg.globs],
        nested=list(msg.nested),
        description=msg.description,
        owners=msg.owners or model.ADMIN_GROUP,
        created_ts=utils.timestamp_to_datetime(msg.created_ts),
        created_by=model.Identity.from_bytes(msg.created_by),
        modified_ts=utils.timestamp_to_datetime(msg.modified_ts),
        modified_by=model.Identity.from_bytes(msg.modified_by))
    for msg in auth_db_proto.groups
  ]

  secrets = [
    model.AuthSecret(
        id=msg.name,
        parent=model.secret_scope_key('global'),
        values=list(msg.values),
        modified_ts=utils.timestamp_to_datetime(msg.modified_ts),
        modified_by=model.Identity.from_bytes(msg.modified_by))
    for msg in auth_db_proto.secrets
  ]

  ip_whitelists = [
    model.AuthIPWhitelist(
        key=model.ip_whitelist_key(msg.name),
        subnets=list(msg.subnets),
        description=msg.description,
        created_ts=utils.timestamp_to_datetime(msg.created_ts),
        created_by=model.Identity.from_bytes(msg.created_by),
        modified_ts=utils.timestamp_to_datetime(msg.modified_ts),
        modified_by=model.Identity.from_bytes(msg.modified_by))
    for msg in auth_db_proto.ip_whitelists
  ]

  ip_whitelist_assignments = model.AuthIPWhitelistAssignments(
      key=model.ip_whitelist_assignments_key(),
      assignments=[
        model.AuthIPWhitelistAssignments.Assignment(
            identity=model.Identity.from_bytes(msg.identity),
            ip_whitelist=msg.ip_whitelist,
            comment=msg.comment,
            created_ts=utils.timestamp_to_datetime(msg.created_ts),
            created_by=model.Identity.from_bytes(msg.created_by))
        for msg in auth_db_proto.ip_whitelist_assignments
      ],
  )

  return AuthDBSnapshot(
      global_config, groups, secrets, ip_whitelists, ip_whitelist_assignments)
コード例 #16
0
ファイル: template.py プロジェクト: misscache/luci-py
def _epochformat(value, f='%Y-%m-%d %H:%M:%S'):
  """Formats a float representing epoch to datetime."""
  if not value:
    return NON_BREAKING_HYPHEN + NON_BREAKING_HYPHEN
  return _datetimeformat(utils.timestamp_to_datetime(value * 1000000), f)
コード例 #17
0
ファイル: template.py プロジェクト: stefb965/luci-py
def _epochformat(value, f='%Y-%m-%d %H:%M:%S'):
    """Formats a float representing epoch to datetime."""
    if not value:
        return NON_BREAKING_HYPHEN + NON_BREAKING_HYPHEN
    return _datetimeformat(utils.timestamp_to_datetime(value * 1000000), f)