Exemple #1
0
  def read(self, ll=None):
    if self._force_download:
      return self.read_aux(ll=ll)

    file_begin = self._pos
    file_end = self._pos + ll if ll is not None else self.get_length()
    #  We have to align with chunks we store. Position is the begginiing of the latest chunk that starts before or at our file
    position = (file_begin // CHUNK_SIZE) * CHUNK_SIZE
    response = b""
    while True:
      self._pos = position
      chunk_number = self._pos / CHUNK_SIZE
      file_name = hash_256(self._url) + "_" + str(chunk_number)
      full_path = os.path.join(CACHE_DIR, str(file_name))
      data = None
      #  If we don't have a file, download it
      if not os.path.exists(full_path):
        data = self.read_aux(ll=CHUNK_SIZE)
        with atomic_write_in_dir(full_path, mode="wb") as new_cached_file:
          new_cached_file.write(data)
      else:
        with open(full_path, "rb") as cached_file:
          data = cached_file.read()

      response += data[max(0, file_begin - position): min(CHUNK_SIZE, file_end - position)]

      position += CHUNK_SIZE
      if position >= file_end:
        self._pos = file_end
        return response
Exemple #2
0
  def get_length(self):
    if self._length is not None:
      return self._length
    file_length_path = os.path.join(CACHE_DIR, hash_256(self._url) + "_length")
    if os.path.exists(file_length_path) and not self._force_download:
      with open(file_length_path, "r") as file_length:
          content = file_length.read()
          self._length = int(content)
          return self._length

    self._length = self.get_length_online()
    if not self._force_download:
      with atomic_write_in_dir(file_length_path, mode="w") as file_length:
        file_length.write(str(self._length))
    return self._length
Exemple #3
0
  def cache_inner(fn, *args, **kwargs):
    if kwargs.pop('no_cache', None):
      cache_path = None
    else:
      cache_prefix = kwargs.pop('cache_prefix', None)
      cache_path = cache_path_for_file_path(fn, cache_prefix)

    if cache_path and os.path.exists(cache_path):
      with open(cache_path, "rb") as cache_file:
        cache_value = pickle.load(cache_file)
    else:
      cache_value = func(fn, *args, **kwargs)

      if cache_path:
        with atomic_write_in_dir(cache_path, mode="wb", overwrite=True) as cache_file:
          pickle.dump(cache_value, cache_file, -1)

    return cache_value
Exemple #4
0
def main():
  def get_influxdb_line(measurement: str, value: float, timestamp: datetime, tags: dict):
    res = f"{measurement}"
    for k, v in tags.items():
      res += f",{k}={str(v)}"
    res += f" value={value} {int(timestamp.timestamp() * 1e9)}\n"
    return res

  # open statistics socket
  ctx = zmq.Context().instance()
  sock = ctx.socket(zmq.PULL)
  sock.bind(STATS_SOCKET)

  # initialize stats directory
  Path(STATS_DIR).mkdir(parents=True, exist_ok=True)

  # initialize tags
  tags = {
    'dongleId': Params().get("DongleId", encoding='utf-8'),
    'started': False,
    'version': get_short_version(),
    'branch': get_short_branch(),
    'dirty': is_dirty(),
    'origin': get_normalized_origin(),
    'deviceType': HARDWARE.get_device_type(),
  }

  # subscribe to deviceState for started state
  sm = SubMaster(['deviceState'])

  last_flush_time = time.monotonic()
  gauges = {}
  while True:
    started_prev = sm['deviceState'].started
    sm.update()

    # Update metrics
    while True:
      try:
        metric = sock.recv_string(zmq.NOBLOCK)
        try:
          metric_type = metric.split('|')[1]
          metric_name = metric.split(':')[0]
          metric_value = metric.split('|')[0].split(':')[1]

          if metric_type == METRIC_TYPE.GAUGE:
            gauges[metric_name] = metric_value
          else:
            cloudlog.event("unknown metric type", metric_type=metric_type)
        except Exception:
          cloudlog.event("malformed metric", metric=metric)
      except zmq.error.Again:
        break

    # flush when started state changes or after FLUSH_TIME_S
    if (time.monotonic() > last_flush_time + STATS_FLUSH_TIME_S) or (sm['deviceState'].started != started_prev):
      result = ""
      current_time = datetime.utcnow().replace(tzinfo=timezone.utc)
      tags['started'] = sm['deviceState'].started

      for gauge_key in gauges:
        result += get_influxdb_line(f"gauge.{gauge_key}", gauges[gauge_key], current_time, tags)

      # clear intermediate data
      gauges = {}
      last_flush_time = time.monotonic()

      # check that we aren't filling up the drive
      if len(os.listdir(STATS_DIR)) < STATS_DIR_FILE_LIMIT:
        if len(result) > 0:
          stats_path = os.path.join(STATS_DIR, str(int(current_time.timestamp())))
          with atomic_write_in_dir(stats_path) as f:
            f.write(result)
      else:
        cloudlog.error("stats dir full")
Exemple #5
0
def main() -> NoReturn:
  dongle_id = Params().get("DongleId", encoding='utf-8')
  def get_influxdb_line(measurement: str, value: Union[float, Dict[str, float]],  timestamp: datetime, tags: dict) -> str:
    res = f"{measurement}"
    for k, v in tags.items():
      res += f",{k}={str(v)}"
    res += " "

    if isinstance(value, float):
      value = {'value': value}

    for k, v in value.items():
      res += f"{k}={v},"

    res += f"dongle_id=\"{dongle_id}\" {int(timestamp.timestamp() * 1e9)}\n"
    return res

  # open statistics socket
  ctx = zmq.Context().instance()
  sock = ctx.socket(zmq.PULL)
  sock.bind(STATS_SOCKET)

  # initialize stats directory
  Path(STATS_DIR).mkdir(parents=True, exist_ok=True)

  # initialize tags
  tags = {
    'started': False,
    'version': get_short_version(),
    'branch': get_short_branch(),
    'dirty': is_dirty(),
    'origin': get_normalized_origin(),
    'deviceType': HARDWARE.get_device_type(),
  }

  # subscribe to deviceState for started state
  sm = SubMaster(['deviceState'])

  idx = 0
  last_flush_time = time.monotonic()
  gauges = {}
  samples: Dict[str, List[float]] = defaultdict(list)
  while True:
    started_prev = sm['deviceState'].started
    sm.update()

    # Update metrics
    while True:
      try:
        metric = sock.recv_string(zmq.NOBLOCK)
        try:
          metric_type = metric.split('|')[1]
          metric_name = metric.split(':')[0]
          metric_value = float(metric.split('|')[0].split(':')[1])

          if metric_type == METRIC_TYPE.GAUGE:
            gauges[metric_name] = metric_value
          elif metric_type == METRIC_TYPE.SAMPLE:
            samples[metric_name].append(metric_value)
          else:
            cloudlog.event("unknown metric type", metric_type=metric_type)
        except Exception:
          cloudlog.event("malformed metric", metric=metric)
      except zmq.error.Again:
        break

    # flush when started state changes or after FLUSH_TIME_S
    if (time.monotonic() > last_flush_time + STATS_FLUSH_TIME_S) or (sm['deviceState'].started != started_prev):
      result = ""
      current_time = datetime.utcnow().replace(tzinfo=timezone.utc)
      tags['started'] = sm['deviceState'].started

      for key, value in gauges.items():
        result += get_influxdb_line(f"gauge.{key}", value, current_time, tags)

      for key, values in samples.items():
        values.sort()
        sample_count = len(values)
        sample_sum = sum(values)

        stats = {
          'count': sample_count,
          'min': values[0],
          'max': values[-1],
          'mean': sample_sum / sample_count,
        }
        for percentile in [0.05, 0.5, 0.95]:
          value = values[int(round(percentile * (sample_count - 1)))]
          stats[f"p{int(percentile * 100)}"] = value

        result += get_influxdb_line(f"sample.{key}", stats, current_time, tags)

      # clear intermediate data
      gauges.clear()
      samples.clear()
      last_flush_time = time.monotonic()

      # check that we aren't filling up the drive
      if len(os.listdir(STATS_DIR)) < STATS_DIR_FILE_LIMIT:
        if len(result) > 0:
          stats_path = os.path.join(STATS_DIR, f"{current_time.timestamp():.0f}_{idx}")
          with atomic_write_in_dir(stats_path) as f:
            f.write(result)
          idx += 1
      else:
        cloudlog.error("stats dir full")