Ejemplo n.º 1
0
 def _setup_collector(self):
     self.sm_collector = SubMaster(['liveTracks'])
     self.log_auto_df = self.op_params.get('log_auto_df')
     if not isinstance(self.log_auto_df, bool):
         self.log_auto_df = False
     self.data_collector = DataCollector(file_path='/data/df_data',
                                         keys=[
                                             'v_ego', 'a_ego', 'a_lead',
                                             'v_lead', 'x_lead', 'profile',
                                             'time'
                                         ],
                                         log_data=self.log_auto_df)
Ejemplo n.º 2
0
    def __init__(self):
        self.sm = SubMaster(['laneSpeed'])
        self.pm = PubMaster(['dynamicCameraOffset'])
        self.op_params = opParams()
        self.camera_offset = self.op_params.get('camera_offset')

        self.left_lane_oncoming = False  # these variables change
        self.right_lane_oncoming = False
        self.last_left_lane_oncoming = False
        self.last_right_lane_oncoming = False
        self.last_oncoming_time = 0
        self.i = 0.0

        self._setup_static()
Ejemplo n.º 3
0
def main():
  def get_influxdb_line(measurement: str, value: float, timestamp: datetime, tags: dict):
    res = f"{measurement}"
    for k, v in tags.items():
      res += f",{k}={str(v)}"
    res += f" value={value} {int(timestamp.timestamp() * 1e9)}\n"
    return res

  # open statistics socket
  ctx = zmq.Context().instance()
  sock = ctx.socket(zmq.PULL)
  sock.bind(STATS_SOCKET)

  # initialize stats directory
  Path(STATS_DIR).mkdir(parents=True, exist_ok=True)

  # initialize tags
  tags = {
    'dongleId': Params().get("DongleId", encoding='utf-8'),
    'started': False,
    'version': get_short_version(),
    'branch': get_short_branch(),
    'dirty': is_dirty(),
    'origin': get_normalized_origin(),
    'deviceType': HARDWARE.get_device_type(),
  }

  # subscribe to deviceState for started state
  sm = SubMaster(['deviceState'])

  last_flush_time = time.monotonic()
  gauges = {}
  while True:
    started_prev = sm['deviceState'].started
    sm.update()

    # Update metrics
    while True:
      try:
        metric = sock.recv_string(zmq.NOBLOCK)
        try:
          metric_type = metric.split('|')[1]
          metric_name = metric.split(':')[0]
          metric_value = metric.split('|')[0].split(':')[1]

          if metric_type == METRIC_TYPE.GAUGE:
            gauges[metric_name] = metric_value
          else:
            cloudlog.event("unknown metric type", metric_type=metric_type)
        except Exception:
          cloudlog.event("malformed metric", metric=metric)
      except zmq.error.Again:
        break

    # flush when started state changes or after FLUSH_TIME_S
    if (time.monotonic() > last_flush_time + STATS_FLUSH_TIME_S) or (sm['deviceState'].started != started_prev):
      result = ""
      current_time = datetime.utcnow().replace(tzinfo=timezone.utc)
      tags['started'] = sm['deviceState'].started

      for gauge_key in gauges:
        result += get_influxdb_line(f"gauge.{gauge_key}", gauges[gauge_key], current_time, tags)

      # clear intermediate data
      gauges = {}
      last_flush_time = time.monotonic()

      # check that we aren't filling up the drive
      if len(os.listdir(STATS_DIR)) < STATS_DIR_FILE_LIMIT:
        if len(result) > 0:
          stats_path = os.path.join(STATS_DIR, str(int(current_time.timestamp())))
          with atomic_write_in_dir(stats_path) as f:
            f.write(result)
      else:
        cloudlog.error("stats dir full")
Ejemplo n.º 4
0

def cputime_total(ct):
    return ct.user + ct.nice + ct.system + ct.idle + ct.iowait + ct.irq + ct.softirq


def cputime_busy(ct):
    return ct.user + ct.nice + ct.system + ct.irq + ct.softirq


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--mem', action='store_true')
    args = parser.parse_args()

    sm = SubMaster(['thermal', 'procLog'])

    last_temp = 0.0
    last_mem = 0.0
    total_times = [0., 0., 0., 0.]
    busy_times = [0., 0., 0.0, 0.]

    while True:
        sm.update()

        if sm.updated['thermal']:
            t = sm['thermal']
            last_temp = np.mean([t.cpu0, t.cpu1, t.cpu2, t.cpu3]) / 10.
            last_mem = t.memUsedPercent

        if sm.updated['procLog']:
Ejemplo n.º 5
0
def main() -> NoReturn:
  dongle_id = Params().get("DongleId", encoding='utf-8')
  def get_influxdb_line(measurement: str, value: Union[float, Dict[str, float]],  timestamp: datetime, tags: dict) -> str:
    res = f"{measurement}"
    for k, v in tags.items():
      res += f",{k}={str(v)}"
    res += " "

    if isinstance(value, float):
      value = {'value': value}

    for k, v in value.items():
      res += f"{k}={v},"

    res += f"dongle_id=\"{dongle_id}\" {int(timestamp.timestamp() * 1e9)}\n"
    return res

  # open statistics socket
  ctx = zmq.Context().instance()
  sock = ctx.socket(zmq.PULL)
  sock.bind(STATS_SOCKET)

  # initialize stats directory
  Path(STATS_DIR).mkdir(parents=True, exist_ok=True)

  # initialize tags
  tags = {
    'started': False,
    'version': get_short_version(),
    'branch': get_short_branch(),
    'dirty': is_dirty(),
    'origin': get_normalized_origin(),
    'deviceType': HARDWARE.get_device_type(),
  }

  # subscribe to deviceState for started state
  sm = SubMaster(['deviceState'])

  idx = 0
  last_flush_time = time.monotonic()
  gauges = {}
  samples: Dict[str, List[float]] = defaultdict(list)
  while True:
    started_prev = sm['deviceState'].started
    sm.update()

    # Update metrics
    while True:
      try:
        metric = sock.recv_string(zmq.NOBLOCK)
        try:
          metric_type = metric.split('|')[1]
          metric_name = metric.split(':')[0]
          metric_value = float(metric.split('|')[0].split(':')[1])

          if metric_type == METRIC_TYPE.GAUGE:
            gauges[metric_name] = metric_value
          elif metric_type == METRIC_TYPE.SAMPLE:
            samples[metric_name].append(metric_value)
          else:
            cloudlog.event("unknown metric type", metric_type=metric_type)
        except Exception:
          cloudlog.event("malformed metric", metric=metric)
      except zmq.error.Again:
        break

    # flush when started state changes or after FLUSH_TIME_S
    if (time.monotonic() > last_flush_time + STATS_FLUSH_TIME_S) or (sm['deviceState'].started != started_prev):
      result = ""
      current_time = datetime.utcnow().replace(tzinfo=timezone.utc)
      tags['started'] = sm['deviceState'].started

      for key, value in gauges.items():
        result += get_influxdb_line(f"gauge.{key}", value, current_time, tags)

      for key, values in samples.items():
        values.sort()
        sample_count = len(values)
        sample_sum = sum(values)

        stats = {
          'count': sample_count,
          'min': values[0],
          'max': values[-1],
          'mean': sample_sum / sample_count,
        }
        for percentile in [0.05, 0.5, 0.95]:
          value = values[int(round(percentile * (sample_count - 1)))]
          stats[f"p{int(percentile * 100)}"] = value

        result += get_influxdb_line(f"sample.{key}", stats, current_time, tags)

      # clear intermediate data
      gauges.clear()
      samples.clear()
      last_flush_time = time.monotonic()

      # check that we aren't filling up the drive
      if len(os.listdir(STATS_DIR)) < STATS_DIR_FILE_LIMIT:
        if len(result) > 0:
          stats_path = os.path.join(STATS_DIR, f"{current_time.timestamp():.0f}_{idx}")
          with atomic_write_in_dir(stats_path) as f:
            f.write(result)
          idx += 1
      else:
        cloudlog.error("stats dir full")
Ejemplo n.º 6
0
    name = proc.name
    if len(proc.cmdline):
        name = proc.cmdline[0]
    if len(proc.exe):
        name = proc.exe + " - " + name

    return name


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--mem', action='store_true')
    parser.add_argument('--cpu', action='store_true')
    args = parser.parse_args()

    sm = SubMaster(['deviceState', 'procLog'])

    last_temp = 0.0
    last_mem = 0.0
    total_times = [0.] * 8
    busy_times = [0.] * 8

    prev_proclog: Optional[capnp._DynamicStructReader] = None
    prev_proclog_t: Optional[int] = None

    while True:
        sm.update()

        if sm.updated['deviceState']:
            t = sm['deviceState']
            last_temp = mean(t.cpuTempC)
Ejemplo n.º 7
0
 def _setup_collector(self):
     self.sm = SubMaster(['liveTracks'])
Ejemplo n.º 8
0
from cereal.messaging import SubMaster
import time

sm = SubMaster(['dynamicFollowData'])

while True:
    sm.update(0)
    print('mpc_TR: {}'.format(sm['dynamicFollowData'].mpcTR))
    time.sleep(1 / 20)
Ejemplo n.º 9
0
from cereal.messaging import SubMaster

sm = SubMaster(['modelV2'], poll=['modelV2'])
model_t = [
    0, 0.009765625, 0.0390625, 0.087890625, 0.15625, 0.24414062, 0.3515625,
    0.47851562, 0.625, 0.79101562, 0.9765625, 1.1816406, 1.40625, 1.6503906,
    1.9140625, 2.1972656, 2.5, 2.8222656, 3.1640625, 3.5253906, 3.90625,
    4.3066406, 4.7265625, 5.1660156, 5.625, 6.1035156, 6.6015625, 7.1191406,
    7.65625, 8.2128906, 8.7890625, 9.3847656, 10
]
mpc_idxs = list(range(10))

model_t_idx = [
    sorted(range(len(model_t)),
           key=[abs(idx - t) for t in model_t].__getitem__)[0]
    for idx in mpc_idxs
]  # matches 0 to 9 interval to idx from t
# speed_curr_idx = sorted(range(len(model_t)), key=[abs(t - .1) for t in model_t].__getitem__)[0]  # idx used for current speed, position still uses model_t_idx

while 1:
    sm.update()

    modelV2 = sm['modelV2']
    if not sm.updated['modelV2'] or len(modelV2.position.x) == 0:
        continue

    distances, speeds, accelerations = [], [], [
    ]  # everything is derived from x position since velocity is outputting weird values
    for t in model_t_idx:
        speeds.append(modelV2.velocity.x[t])
        distances.append(modelV2.position.x[t])