Ejemplo n.º 1
0
  def test_cluster(self):
    """Test the '/cluster' endpoint."""
    start_time = utilities.now()
    # Execrcise the collector. Read data from golden files and compute
    # a context graph.
    ret_value = self.app.get('/cluster')
    end_time = utilities.now()
    result = json.loads(ret_value.data)
    self.verify_resources(result, start_time, end_time)

    self.assertEqual(23, self.count_relations(result, 'contains'))
    self.assertEqual(3, self.count_relations(result, 'createdFrom'))
    self.assertEqual(7, self.count_relations(result, 'loadBalances'))
    self.assertEqual(6, self.count_relations(result, 'monitors'))
    self.assertEqual(10, self.count_relations(result, 'runs'))

    # Verify that all relations contain a timestamp in the range
    # [start_time, end_time].
    self.assertTrue(isinstance(result.get('relations'), types.ListType))
    for r in result['relations']:
      self.assertTrue(isinstance(r, types.DictType))
      timestamp = r.get('timestamp')
      self.assertTrue(utilities.valid_string(timestamp))
      self.assertTrue(start_time <= timestamp <= end_time)

    # The overall timestamp must be in the expected range.
    self.assertTrue(utilities.valid_string(result.get('timestamp')))
    self.assertTrue(start_time <= result['timestamp'] <= end_time)

    json_output = json.dumps(result, sort_keys=True)
    self.assertEqual(2, json_output.count('"alternateLabel": '))
    self.assertEqual(85, json_output.count('"createdBy": '))
Ejemplo n.º 2
0
def compile_light_time(device_id):
    """Process time and light."""
    config = get_config_for_device(device_id)
    status = get_status_for_device(device_id)
    if config and status:
        _now = now()
        timestamp = get_timestamp(status)
        light = get_light_threshold(config)
        time_diff = round(get_time_diff(timestamp, _now))
        analysis = analyze_time_diff(time_diff, device_id)
        light_reading = get_current_light_reading(status)
    else:
        _now = now()
        timestamp = 'I got a 401 or some other error'
        light = 'I got a 401 or some other error'
        time_diff = 'I got a 401 or some other error'
        analysis = 'I got a 401 or some other error'
        light_reading = 'I got a 401 or some other error'

    # print("time: {}, light:{}, 'time_diff': {}, 'analysis': {}".format(
    #     timestamp,
    #     light,
    #     time_diff,
    #     analysis)
    # )

    return ({
        'now': _now,
        'time': timestamp,
        'light': light,
        'time difference': time_diff,
        'analysis': analysis,
        'light_reading': light_reading
    })
Ejemplo n.º 3
0
    def test_cluster(self):
        """Test the '/cluster' endpoint."""
        start_time = utilities.now()
        # Execrcise the collector. Read data from golden files and compute
        # a context graph.
        ret_value = self.app.get('/cluster')
        end_time = utilities.now()
        result = json.loads(ret_value.data)
        self.verify_resources(result, start_time, end_time)

        self.assertEqual(23, self.count_relations(result, 'contains'))
        self.assertEqual(3, self.count_relations(result, 'createdFrom'))
        self.assertEqual(7, self.count_relations(result, 'loadBalances'))
        self.assertEqual(6, self.count_relations(result, 'monitors'))
        self.assertEqual(10, self.count_relations(result, 'runs'))

        # Verify that all relations contain a timestamp in the range
        # [start_time, end_time].
        self.assertTrue(isinstance(result.get('relations'), types.ListType))
        for r in result['relations']:
            self.assertTrue(isinstance(r, types.DictType))
            timestamp = r.get('timestamp')
            self.assertTrue(utilities.valid_string(timestamp))
            self.assertTrue(start_time <= timestamp <= end_time)

        # The overall timestamp must be in the expected range.
        self.assertTrue(utilities.valid_string(result.get('timestamp')))
        self.assertTrue(start_time <= result['timestamp'] <= end_time)

        json_output = json.dumps(result, sort_keys=True)
        self.assertEqual(2, json_output.count('"alternateLabel": '))
        self.assertEqual(85, json_output.count('"createdBy": '))
Ejemplo n.º 4
0
  def add_relation(self, source, target, kind, label=None, metadata=None):
    """Adds a relation to the context graph."""
    assert utilities.valid_string(source) and utilities.valid_string(target)
    assert utilities.valid_string(kind)
    assert utilities.valid_optional_string(label)
    assert (metadata is None) or isinstance(metadata, dict)

    with self._lock:
      # The timestamp of the relation should be inherited from the previous
      # context graph.
      key = (source, target, kind)
      timestamp = self._previous_relations_to_timestamps.get(key)
      if not utilities.valid_string(timestamp):
        timestamp = utilities.now()

      # Add the relation to the context graph data structure.
      relation = {
          'source': source,
          'target': target,
          'type': kind,
          'timestamp': timestamp
      }
      self._current_relations_to_timestamps[key] = timestamp

      # Add annotations as needed.
      relation['annotations'] = {}
      if metadata is not None:
        relation['annotations']['metadata'] = copy.deep_copy(metadata)
      relation['annotations']['label'] = label if label is not None else kind

      self._context_relations.append(relation)
Ejemplo n.º 5
0
  def add_relation(self, source, target, kind, label=None, metadata=None):
    """Adds a relation to the context graph."""
    assert utilities.valid_string(source) and utilities.valid_string(target)
    assert utilities.valid_string(kind)
    assert utilities.valid_optional_string(label)
    assert (metadata is None) or isinstance(metadata, types.DictType)

    with self._lock:
      # The timestamp of the relation should be inherited from the previous
      # context graph.
      key = (source, target, kind)
      timestamp = self._previous_relations_to_timestamps.get(key)
      if not utilities.valid_string(timestamp):
        timestamp = utilities.now()

      # Add the relation to the context graph data structure.
      relation = {
          'source': source,
          'target': target,
          'type': kind,
          'timestamp': timestamp
      }
      self._current_relations_to_timestamps[key] = timestamp

      # Add annotations as needed.
      relation['annotations'] = {}
      if metadata is not None:
        relation['annotations']['metadata'] = copy.deep_copy(metadata)

      relation['annotations']['label'] = label if label is not None else kind
      if self._version is not None:
        relation['annotations']['createdBy'] = self._version

      self._context_relations.append(relation)
Ejemplo n.º 6
0
 def monitor_handler(self, exception=None):
     """ monitor handler, defines what happens when a new value arrives;
     Currently a new values is added to the `values` list """
     try:
         if exception is None:
             #        if self.status == pyca.NO_ALARM:
             self.last_update = self.timestr()
             if (self.monitor_append): self.values.append(self.value)
             if (is_debug_on()):
                 print utilities.now() + " ",
                 print "monitoring %s %s" % (self.name, self.timestr()),
                 print self.value
         else:
             print "%-30s " % (self.name), exception
     except Exception, e:
         print e
Ejemplo n.º 7
0
class User(db.Document):
    username = db.StringField(db_field="u", required=True)
    password = db.StringField(db_field="p", required=True)
    email = db.EmailField(db_field="e", required=True)
    firstname = db.StringField(db_field="fn", max_length=50)
    lastname = db.StringField(db_field="ln", max_length=50)
    created = db.IntField(db_field="c", default=now())
Ejemplo n.º 8
0
  def test_resources(self):
    """Test the '/resources' endpoint."""
    start_time = utilities.now()
    ret_value = self.app.get('/cluster/resources')
    end_time = utilities.now()
    result = json.loads(ret_value.data)
    self.verify_resources(result, start_time, end_time)

    self.assertEqual(0, self.count_relations(result, 'contains'))
    self.assertEqual(0, self.count_relations(result, 'createdFrom'))
    self.assertEqual(0, self.count_relations(result, 'loadBalances'))
    self.assertEqual(0, self.count_relations(result, 'monitors'))
    self.assertEqual(0, self.count_relations(result, 'runs'))

    # The overall timestamp must be in the expected range.
    self.assertTrue(utilities.valid_string(result.get('timestamp')))
    self.assertTrue(start_time <= result['timestamp'] <= end_time)
Ejemplo n.º 9
0
 def to_context_resources(self):
     """Returns just the resources in Cluster-Insight context graph format."""
     with self._lock:
         resources = {
             'success': True,
             'timestamp': self._max_resources_timestamp(utilities.now()),
             'resources': self._context_resources,
         }
         return resources
Ejemplo n.º 10
0
 def to_context_resources(self):
   """Returns just the resources in Cluster-Insight context graph format."""
   with self._lock:
     resources = {
         'success': True,
         'timestamp': self._max_resources_timestamp(utilities.now()),
         'resources': self._context_resources,
     }
     return resources
Ejemplo n.º 11
0
 def to_context_graph(self):
   """Returns the context graph in cluster-insight context graph format."""
   # return graph in Cluster-Insight context graph format.
   with self._lock:
     context_graph = {
         'success': True,
         'timestamp': self._max_relations_timestamp(
             self._max_resources_timestamp(utilities.now())),
         'resources': self._context_resources,
         'relations': self._context_relations
     }
     return context_graph
Ejemplo n.º 12
0
  def test_make_response(self):
    """Tests make_response()."""
    # The timestamp of the first response is the current time.
    start_time = utilities.now()
    resp = utilities.make_response([], 'resources')
    end_time = utilities.now()
    # Note that timless_json_hash() ignores the value of the timestamp.
    self.assertEqual(
        utilities.timeless_json_hash(
            {'success': True, 'timestamp': utilities.now(), 'resources': []}),
        utilities.timeless_json_hash(resp))
    self.assertTrue(start_time <= resp.get('timestamp') <= end_time)

    # The timestamp of the second response is the timestamp of the container.
    resp = utilities.make_response([CONTAINER], 'resources')
    self.assertEqual(
        utilities.timeless_json_hash(
            {'success': True, 'timestamp': utilities.now(), 'resources':
             [CONTAINER]}),
        utilities.timeless_json_hash(resp))
    self.assertEqual(CONTAINER['timestamp'], resp['timestamp'])
Ejemplo n.º 13
0
def _make_error(error_message):
  """Returns an error response in the Cluster-Insight context graph format.

  Args:
    error_message: the error message describing the failure.

  Returns:
    An error response in the cluster-insight context graph format.
  """
  assert isinstance(error_message, types.StringTypes) and error_message
  return {'success': False,
          'timestamp': utilities.now(),
          'error_message': error_message}
Ejemplo n.º 14
0
  def test_timeless_json_hash(self):
    """Tests timeless_json_hash() with multiple similar and dissimilar objects.
    """
    a = {'uid': 'A', 'creationTimestamp': '2015-02-20T21:39:34Z'}

    # 'b1' and 'b2' differs just by the value of the 'lastHearbeatTime'
    # attribute.
    b1 = {'uid': 'B', 'lastHeartbeatTime': '2015-03-13T22:32:15Z'}
    b2 = {'uid': 'B', 'lastHeartbeatTime': utilities.now()}

    # 'c1' and 'c2' differs just by the value of the 'resourceVersion'
    # attribute.
    c1 = {'uid': 'C', 'resourceVersion': '13'}
    c2 = {'uid': 'C', 'resourceVersion': '42'}

    # 'wrapped_xxx' objects look like the objects we normally keep in the cache.
    # The difference between 'wrapped_a1' and 'wrapped_a2' is the value of the
    # 'timestamp' attribute.
    wrapped_a1 = utilities.wrap_object(a, 'Node', 'aaa', time.time())
    wrapped_a2 = utilities.wrap_object(a, 'Node', 'aaa', time.time() + 100)

    # The difference between the 'wrapped_b1', 'wrapped_b2' and 'wrapped_b3'
    # objects are the values of the 'timestamp' and 'lastHeartbeatTime'
    # attributes.
    now = time.time()
    wrapped_b1 = utilities.wrap_object(b1, 'Node', 'bbb', now)
    wrapped_b2 = utilities.wrap_object(b2, 'Node', 'bbb', now)
    wrapped_b3 = utilities.wrap_object(b2, 'Node', 'bbb', now + 100)

    # The difference between 'wrapped_c1' and 'wrapped_c2' objects are
    # the values of the 'timestamp' and 'resourceVersion' attributes.
    wrapped_c1 = utilities.wrap_object(c1, 'Node', 'bbb', now)
    wrapped_c2 = utilities.wrap_object(c2, 'Node', 'bbb', now + 100)

    self.assertEqual(utilities.timeless_json_hash(wrapped_a1),
                     utilities.timeless_json_hash(wrapped_a2))
    self.assertEqual(utilities.timeless_json_hash(wrapped_b1),
                     utilities.timeless_json_hash(wrapped_b2))
    self.assertEqual(utilities.timeless_json_hash(wrapped_b1),
                     utilities.timeless_json_hash(wrapped_b3))
    self.assertEqual(utilities.timeless_json_hash(wrapped_c1),
                     utilities.timeless_json_hash(wrapped_c2))

    # Verify that the hash values of lists of objects behaves as expected.
    self.assertEqual(utilities.timeless_json_hash([wrapped_a1, wrapped_b3]),
                     utilities.timeless_json_hash([wrapped_a2, wrapped_b1]))

    # Verify that the hash value of dissimilar objects is not equal.
    self.assertTrue(utilities.timeless_json_hash(wrapped_a1) !=
                    utilities.timeless_json_hash(wrapped_b1))
Ejemplo n.º 15
0
def _make_error(error_message):
    """Returns an error response in the Cluster-Insight context graph format.

  Args:
    error_message: the error message describing the failure.

  Returns:
    An error response in the cluster-insight context graph format.
  """
    assert isinstance(error_message, types.StringTypes) and error_message
    return {
        'success': False,
        'timestamp': utilities.now(),
        'error_message': error_message
    }
Ejemplo n.º 16
0
def logprint(text,date=True,print_screen=None,newline=True):
  """ appends `text` to the logfile.
  Optional (booleans):
  date: put time stamp (with millisecond resolution in front of the string)
  print_screen: write also to screen
  newline: terminate with newline the string"""
  if ( (gLogf is None) or (gFilename.startswith(utilities.today()) != True) ): guessFilename()
  if (config.PRINT_DATE & date):
    text = "%s %s" % (utilities.now(),text)
  if (newline): text += "\n"
  if (print_screen is None): print_screen = config.LOGFILE_print_screen
  if (print_screen):
    sys.stdout.write(text)
    sys.stdout.flush()
  gLogf.write(text)
  gLogf.flush()
Ejemplo n.º 17
0
 def to_context_graph(self):
     """Returns the context graph in cluster-insight context graph format."""
     # return graph in Cluster-Insight context graph format.
     with self._lock:
         context_graph = {
             'success':
             True,
             'timestamp':
             self._max_relations_timestamp(
                 self._max_resources_timestamp(utilities.now())),
             'resources':
             self._context_resources,
             'relations':
             self._context_relations
         }
         return context_graph
Ejemplo n.º 18
0
  def max_resources_and_relations_timestamp(self):
    """Computes the maximal timestamp of all resources and relations.

    Must be called while holding self._lock.
    If there are no resources and no relations, return the current time.

    Returns:
    Maximum timestamp of all resources and relations.
    """
    max_timestamp = None
    for r in self._context_resources:
      if (max_timestamp is None) or (r['timestamp'] > max_timestamp):
        max_timestamp = r['timestamp']

    for r in self._context_relations:
      if (max_timestamp is None) or (r['timestamp'] > max_timestamp):
        max_timestamp = r['timestamp']

    return utilities.now() if max_timestamp is None else max_timestamp
Ejemplo n.º 19
0
def update_unit_status(status, config, time_diff, iterator):
    """Combine values with cell positions in a dictionary."""
    s = types.SimpleNamespace()
    s.light_threshold_status = {}
    s.now_status = {}
    s.report_status = {}
    s.diff_status = {}
    s._now = now()
    s.timestamp = get_timestamp(status)
    s.light_threshold = get_light_threshold(config)
    s.light_threshold_status['value'] = s.light_threshold
    # light_threshold_status['cell'] = "C{}".format(str(i))
    s.light_threshold_status['cell'] = light_threshold_status_cell(iterator)

    s.now_status['value'] = s._now
    s.now_status['cell'] = time_checked_cell(iterator)

    s.report_status['value'] = s.timestamp
    s.report_status['cell'] = report_status_cell(iterator)

    try:
        s.time_diff = round(get_time_diff(s.timestamp, s._now))
    except TypeError:
        s.time_diff = -1

    if s.time_diff > 32:
        check_battery = True
    else:
        check_battery = False
    s.check_battery = check_battery
    s.pretty_time = "CHECK BATTERY" if check_battery else get_pretty_time(
        s.time_diff)

    s.diff_status['value'] = "{}".format(s.pretty_time)
    s.diff_status['cell'] = time_since_last_report_cell(iterator)

    return s
Ejemplo n.º 20
0
  def test_timeless_json_hash(self):
    """Tests timeless_json_hash() with multiple similar and dissimilar objects.
    """
    a = {'uid': 'A', 'creationTimestamp': '2015-02-20T21:39:34Z'}

    # 'b1' and 'b2' differs just by the value of the 'lastProbeTime' attribute.
    b1 = {'uid': 'B', 'lastProbeTime': '2015-03-13T22:32:15Z'}
    b2 = {'uid': 'B', 'lastProbeTime': utilities.now()}

    # 'wrapped_xxx' objects look like the objects we normally keep in the cache.
    # The difference between 'wrapped_a1' and 'wrapped_a2' is the value of the
    # 'timestamp' attribute.
    wrapped_a1 = utilities.wrap_object(a, 'Node', 'aaa', time.time())
    wrapped_a2 = utilities.wrap_object(a, 'Node', 'aaa', time.time() + 100)

    # The difference between the 'wrapped_b1', 'wrapped_b2' and 'wrapped_b3'
    # objects are the values of the 'timestamp' and 'lastProbeTime' attributes.
    now = time.time()
    wrapped_b1 = utilities.wrap_object(b1, 'Node', 'bbb', now)
    wrapped_b2 = utilities.wrap_object(b2, 'Node', 'bbb', now)
    wrapped_b3 = utilities.wrap_object(b2, 'Node', 'bbb', now + 100)

    self.assertEqual(utilities.timeless_json_hash(wrapped_a1),
                     utilities.timeless_json_hash(wrapped_a2))
    self.assertEqual(utilities.timeless_json_hash(wrapped_b1),
                     utilities.timeless_json_hash(wrapped_b2))
    self.assertEqual(utilities.timeless_json_hash(wrapped_b1),
                     utilities.timeless_json_hash(wrapped_b3))

    # Verify that the hash values of lists of objects behaves as expected.
    self.assertEqual(utilities.timeless_json_hash([wrapped_a1, wrapped_b3]),
                     utilities.timeless_json_hash([wrapped_a2, wrapped_b1]))

    # Verify that the hash value of dissimilar objects is not equal.
    self.assertTrue(utilities.timeless_json_hash(wrapped_a1) !=
                    utilities.timeless_json_hash(wrapped_b1))
Ejemplo n.º 21
0
def _do_compute_graph(gs, output_format):
  """Returns the context graph in the specified format.

  Args:
    gs: the global state.
    output_format: one of 'dot', 'context_graph', or 'resources'.

  Returns:
    A successful response in the specified format.

  Raises:
    CollectorError: inconsistent or invalid graph data.
  """
  assert isinstance(gs, global_state.GlobalState)
  assert utilities.valid_string(output_format)

  g = ContextGraph()
  g.set_relations_to_timestamps(gs.get_relations_to_timestamps())

  # Nodes
  nodes_list = kubernetes.get_nodes_with_metrics(gs)
  if not nodes_list:
    return g.dump(output_format)

  # Find the timestamp of the oldest node. This will be the timestamp of
  # the cluster.
  oldest_timestamp = utilities.now()
  for node in nodes_list:
    assert utilities.is_wrapped_object(node, 'Node')
    # note: we cannot call min(oldest_timestamp, node['timestamp']) here
    # because min(string) returnes the smallest character in the string.
    if node['timestamp'] < oldest_timestamp:
      oldest_timestamp = node['timestamp']

  # The cluster name may be available through the Kubernetes API someday.
  # TODO(rimey): Determine the cluster name.
  cluster_name = '_unknown_'
  cluster_guid = 'Cluster:' + cluster_name
  g.set_title(cluster_name)
  g.add_resource(cluster_guid, {'label': cluster_name}, 'Cluster',
                 oldest_timestamp, {})

  # Nodes
  for node in nodes_list:
    _do_compute_node(cluster_guid, node, g)

  # Pods
  for pod in kubernetes.get_pods(gs):
    _do_compute_pod(cluster_guid, pod, g)

  # Services
  for service in kubernetes.get_services(gs):
    _do_compute_service(gs, cluster_guid, service, g)

  # ReplicationControllers
  for rcontroller in kubernetes.get_rcontrollers(gs):
    _do_compute_rcontroller(gs, cluster_guid, rcontroller, g)

  # Other nodes, not on the list, such as the Kubernetes master.
  _do_compute_other_nodes(gs, cluster_guid, nodes_list, oldest_timestamp, g)

  # Keep the relations_to_timestamps mapping for next call.
  gs.set_relations_to_timestamps(g.get_relations_to_timestamps())
  g.set_metadata({'timestamp': g.max_resources_and_relations_timestamp()})

  # Dump the resulting graph
  return g.dump(output_format)
Ejemplo n.º 22
0
  def test_cluster(self):
    """Test the '/cluster' endpoint."""
    start_time = utilities.now()
    end_time = None
    for _ in range(2):
      # Exercise the collector. Read data from golden files and compute
      # a context graph.
      # The second iteration should read from the cache.
      ret_value = self.app.get('/cluster')
      if end_time is None:
        end_time = utilities.now()
      result = json.loads(ret_value.data)
      # The timestamps of the second iteration should be the same as in the
      # first iteration, because the data of the 2nd iteration should be
      # fetched from the cache, and it did not change.
      # Even if fetching the data caused an explicit reading from the files
      # in the second iteration, the data did not change, so it should keep
      # its original timestamp.
      self.verify_resources(result, start_time, end_time)

      self.assertEqual(5, self.count_relations(
          result, 'contains', 'Cluster', 'Node'))
      self.assertEqual(6, self.count_relations(
          result, 'contains', 'Cluster', 'Service'))
      self.assertEqual(3, self.count_relations(
          result, 'contains', 'Cluster', 'ReplicationController'))
      self.assertEqual(16, self.count_relations(
          result, 'contains', 'Pod', 'Container'))

      self.assertEqual(30, self.count_relations(result, 'contains'))
      self.assertEqual(16, self.count_relations(result, 'createdFrom'))
      self.assertEqual(7, self.count_relations(result, 'loadBalances'))
      self.assertEqual(6, self.count_relations(result, 'monitors'))
      self.assertEqual(14, self.count_relations(result, 'runs'))

      # Verify that all relations contain a timestamp in the range
      # [start_time, end_time].
      self.assertTrue(isinstance(result.get('relations'), list))
      for r in result['relations']:
        self.assertTrue(isinstance(r, dict))
        timestamp = r.get('timestamp')
        self.assertTrue(utilities.valid_string(timestamp))
        self.assertTrue(start_time <= timestamp <= end_time)

      # The overall timestamp must be in the expected range.
      self.assertTrue(utilities.valid_string(result.get('timestamp')))
      self.assertTrue(start_time <= result['timestamp'] <= end_time)

      # Wait a little to ensure that the current time is greater than
      # end_time
      time.sleep(1)
      self.assertTrue(utilities.now() > end_time)

    # Change the timestamp of the nodes in the cache.
    timestamp_before_update = utilities.now()
    gs = collector.app.context_graph_global_state
    nodes, timestamp_seconds = gs.get_nodes_cache().lookup('')
    self.assertTrue(isinstance(nodes, list))
    self.assertTrue(start_time <=
                    utilities.seconds_to_timestamp(timestamp_seconds) <=
                    end_time)
    # Change the first node to force the timestamp in the cache to change.
    # We have to change both the properties of the first node and its
    # timestamp, so the cache will store the new value (including the new
    # timestamp).
    self.assertTrue(len(nodes) >= 1)
    self.assertTrue(utilities.is_wrapped_object(nodes[0], 'Node'))
    nodes[0]['properties']['newAttribute123'] = 'the quick brown fox jumps over'
    nodes[0]['timestamp'] = utilities.now()
    gs.get_nodes_cache().update('', nodes)
    timestamp_after_update = utilities.now()
    _, timestamp_seconds = gs.get_nodes_cache().lookup('')
    self.assertTrue(timestamp_before_update <=
                    utilities.seconds_to_timestamp(timestamp_seconds) <=
                    timestamp_after_update)

    # Build the context graph again.
    ret_value = self.app.get('/cluster')
    result = json.loads(ret_value.data)
    self.verify_resources(result, start_time, timestamp_after_update)

    # Verify that all relations contain a timestamp in the range
    # [start_time, end_time].
    self.assertTrue(isinstance(result.get('relations'), list))
    for r in result['relations']:
      self.assertTrue(isinstance(r, dict))
      timestamp = r.get('timestamp')
      self.assertTrue(utilities.valid_string(timestamp))
      self.assertTrue(start_time <= timestamp <= end_time)

    # The overall timestamp must be in the expected range.
    self.assertTrue(utilities.valid_string(result.get('timestamp')))
    self.assertTrue(timestamp_before_update <= result['timestamp'] <=
                    timestamp_after_update)
Ejemplo n.º 23
0
def doFolderOrFiles(folderNameOrFileList,
    skipFirst=0,
    forceChi=False,
    psize=100e-6,
    d = 0.1,
    xcen = 1024,
    ycen = 1024,
    lam = 1.,
    qbin = 0.01,
    tx = 0,
    ty = 0,
    x=None,
    y=None,
    folderOut=None,
    mask = None,
    waitForFiles=True,
    imageReader = readMar,
    ccdfilenames="*.mccd"
    ):
  """ Perform azimuthal averaging.
    folderNameOrFileList: can be either a folder (where the *.mccd will be found)
                          or a file list
    skipFirst: to skip the first files (but careful to order...)
    forceChi : calculate chi file even if output chi is present
    d = sample detector distance 
    pixelsize = pixel size (in m)
    xcen,ycen = center of the image
    tx,ty = angle of detector normal with respect to incoming beam (in deg)
        zeros are for perpendicular configuration
    qbin = rebinning q (spacing or list)
    Pplane = Polarization (1 = horizontal, 0 = vertical)
    d     = distance of center of detector to sample (in m)
    lam   = wavelength in Ang
    folderOut : if None, same as folderNameOrFileList
    imageReader : function that takes a name and return the intensity matrix
    ccdfilenames : pattern to look for files
  """
  az = None
  t_start = util.now()
  while ((az is None) or (waitForFiles)):
    if (os.path.isdir(folderNameOrFileList)):
      f=os.popen("ls -1 %s/%s" % (folderNameOrFileList,ccdfilenames))
      temp=f.readlines()
      f.close()
      files = []
      for t in temp:
        files.append(t.strip())
      files = files[skipFirst:]
      if folderOut is None: folderOut = folderNameOrFileList
    else:
      if type(folderNameOrFileList) != list:
        folderNameOrFileList = (folderNameOrFileList,); # if we pass single file
      files=folderNameOrFileList[skipFirst:]
      if folderOut is None: folderOut = os.path.dirname(folderNameOrFileList[0])
    if (az is None):
      if not os.path.exists(folderOut): os.makedirs(folderOut)
      t0=time.time()
      f = files[0]
      iData = imageReader(f)
      i0_mask = iData != 0
      fname = folderOut+"/"+"azimuthal_averaging_info.png"
      az = azimuthal_averaging(mask&i0_mask,xcen,ycen,pixelsize=psize,x=x,y=y,d=d,
          lam=lam,qbin=qbin,img=iData,report_file=fname)
      #az.displaypolar(iData)
      print "Time needed for inizialization %.2f"%(time.time()-t0)
      fname = folderOut+"/"+"azimuthal_averaging_info.txt"
      finfo=open(fname,"w")
      finfo.write(az.header)
      finfo.close()
    if (len(files) == 0):
      print "Done %d files I could find, waiting for new files (%s)" % (skipFirst,util.now())
      time.sleep(10)
    t0=time.time()
    t_save=0.
    t_read=0.
    t_az=0.
    skip = 0
    flist = []
    data = []
    err  = []
    for f in files:
      fout = os.path.splitext(os.path.basename(f))[0]
      fout = folderOut + "/" + fout + ".chi"
      if (os.path.exists(fout) and not forceChi):
        skip += 1
        continue
      else:
        t1 = time.time()
        iData = imageReader(f)
        t_read += (time.time()-t1)
        t1 = time.time()
        az.do_azimuthal_averaging(iData)
        t_az += (time.time()-t1)
        t1 = time.time()
        az.saveChiFile(fout)
        t_save += (time.time()-t1)
        flist.append(f)
        data.append(az.I)
        err.append(az.sig)
    if ((len(files)-skip)!=0):
      nfiles = len(files)
      s="Time needed for %d files: %.2f ms/file"%(nfiles,(time.time()-t0)/nfiles*1e3)
      ttot = t_read+t_save+t_az
      s+="\n"+ "Fraction of time to read,calc,save     : %.2f,%.2f,%.2f" % (t_read/ttot,t_az/ttot,t_save/ttot)
      s+="\n"+ "Time per file    to read,calc,save (ms): %.2f,%.2f,%.2f" % \
          (t_read/nfiles*1e3,t_az/nfiles*1e3,t_save/nfiles*1e3)
      print s
      finfo=open(fname,"a")
      finfo.write(s)
      finfo.close()
      hname = folderOut + "/" + folderOut.rstrip("/").split("/")[-1]+".h5"
      t_end = util.now()
      if (~forceChi & (os.path.exists(hname))):
        hchi = h5py.File(hname,"r")
        flist = [hchi["flist"].value,flist]
        data = [hchi["data"].value,data]
        err  = [hchi["err"].value,err]
        hchi.close()
      hchi = h5py.File(hname,"w")
      hchi.attrs["time_start"]=t_start
      hchi.attrs["time_end"]=t_end
      hchi.attrs["info"] = az.header
      hchi.attrs["time_bench"] = s
      hchi.create_dataset("flist",data=flist)
      hchi.create_dataset("data",data=data)
      hchi.create_dataset("err",data=err)
      hchi.create_dataset("q",data=az.q)
      hchi.create_dataset("theta",data=az.theta)
      hchi.create_dataset("Npixel",data=az.Npixel)
      hchi.close()
    skipFirst += len(files)-skip
  return az
Ejemplo n.º 24
0
def doImages(listOfImgs,
    psize=100e-6,
    d = 0.1,
    xcen = 1024,
    ycen = 1024,
    lam = 1.,
    qbin = 0.01,
    tx = 0,
    ty = 0,
    x=None,
    y=None,
    folderOut="./",
    mask = None,
    force=False,
    nJobs = 4,
    hdf5out = None
    ):
  """ Perform azimuthal averaging.
    listOfImgs: list of images previously read
    d = sample detector distance 
    pixelsize = pixel size (in m)
    xcen,ycen = center of the image
    tx,ty = angle of detector normal with respect to incoming beam (in deg)
        zeros are for perpendicular configuration
    qbin = rebinning q (spacing or list)
    Pplane = Polarization (1 = horizontal, 0 = vertical)
    d     = distance of center of detector to sample (in m)
    lam   = wavelength in Ang
    force = if True force reinizialization
    hdf5out = if not None, use as outfile name
    folderOut : 
  """
  print "NJOBS",nJobs
  t_start = util.now()
  t0=time.time()
  if (len(listOfImgs) == 0):
    return
  if (g_az is None):
    if (mask is None):
      mask=np.ones_like(listOfImgs[0],dtype=np.bool)
    t0=time.time()
    fname = folderOut+"/"+"azimuthal_averaging_info.png"
    az = azimuthal_averaging(mask,xcen,ycen,pixelsize=psize,x=x,y=y,d=d,
        lam=lam,qbin=qbin,report_file=fname)
    print "Time needed for inizialization %.2f"%(time.time()-t0)
    globals()["g_az"] = az
  else:
    az = g_az
  fname = folderOut+"/"+"azimuthal_averaging_info.txt"
  t0=time.time()
  nq = az.nq
  nImg = len(listOfImgs)
  if (nJobs > 1):
    import jobManager
    ag = jobManager.myAgent(nJobs=nJobs,parallel="thread")
    #ag = jobManager.myAgent(nMax=nJobs,parallel="process")
    n = int(np.ceil(float(nImg)/nJobs))
    for i in range(nJobs):
      m=i*n
      M=(i+1)*n
      M=min(M,nImg)
      ag.addJob( _doImages,(listOfImgs[m:M],az) )
    ag.waitUntilAllDone(update=0.05)
    #time.sleep(10)
    dataI=np.vstack ( [x[0] for x in ag.data] )
    #dataI=np.reshape(dataI, (nImg,az.nq) )
    dataE=np.vstack ( [x[1] for x in ag.data] )
    #dataE=np.reshape(dataE, (nImg,az.nq) )
  else:
    dataI,dataE=_doImages(listOfImgs,az)
  t_end = util.now()
  s="Time needed for %d images: %.2f ms/img"%(nImg,(time.time()-t0)/nImg*1e3)
  print s
  finfo=open(fname,"a")
  finfo.write(s)
  finfo.close()
  if hdf5out is not None:
    hname = folderOut + "/" + hdf5out
    hchi = h5py.File(hname,"w")
    hchi.attrs["time_start"]=t_start
    hchi.attrs["time_end"]=t_end
    hchi.attrs["info"] = az.header
    hchi.attrs["time_bench"] = s
    hchi.create_dataset("data",data=dataI)
    hchi.create_dataset("err",data=dataE)
    hchi.create_dataset("q",data=az.q)
    hchi.create_dataset("theta",data=az.theta)
    hchi.create_dataset("Npixel",data=az.Npixel)
    hchi.close()
  return dataI,dataE,az
Ejemplo n.º 25
0
def _do_compute_graph(gs, input_queue, output_queue, output_format):
  """Returns the context graph in the specified format.

  Args:
    gs: the global state.
    input_queue: the input queue for the worker threads.
    output_queue: output queue containing exceptions data from the worker
        threads.
    output_format: one of 'graph', 'dot', 'context_graph', or 'resources'.

  Returns:
    A successful response in the specified format.

  Raises:
    CollectorError: inconsistent or invalid graph data.
  """
  assert isinstance(gs, global_state.GlobalState)
  assert isinstance(input_queue, Queue.PriorityQueue)
  assert isinstance(output_queue, Queue.Queue)
  assert utilities.valid_string(output_format)

  g = ContextGraph()
  g.set_version(docker.get_version(gs))
  g.set_metadata({'timestamp': utilities.now()})
  g.set_relations_to_timestamps(gs.get_relations_to_timestamps())

  # Nodes
  nodes_list = kubernetes.get_nodes_with_metrics(gs)
  if not nodes_list:
    return g.dump(gs, output_format)

  # Find the timestamp of the oldest node. This will be the timestamp of
  # the cluster.
  oldest_timestamp = utilities.now()
  for node in nodes_list:
    assert utilities.is_wrapped_object(node, 'Node')
    # note: we cannot call min(oldest_timestamp, node['timestamp']) here
    # because min(string) returnes the smallest character in the string.
    if node['timestamp'] < oldest_timestamp:
      oldest_timestamp = node['timestamp']

  # Get the cluster name from the first node.
  # The cluster name is an approximation. It is not a big deal if it
  # is incorrect, since the aggregator knows the cluster name.
  cluster_name = utilities.node_id_to_cluster_name(nodes_list[0]['id'])
  cluster_guid = 'Cluster:' + cluster_name
  g.set_title(cluster_name)
  g.add_resource(cluster_guid, {'label': cluster_name}, 'Cluster',
                 oldest_timestamp, {})

  # Nodes
  for node in nodes_list:
    input_queue.put((
        gs.get_random_priority(),
        _do_compute_node,
        {'gs': gs, 'input_queue': input_queue, 'cluster_guid': cluster_guid,
         'node': node, 'g': g}))

  # Services
  for service in kubernetes.get_services(gs):
    input_queue.put((
        gs.get_random_priority(),
        _do_compute_service,
        {'gs': gs, 'cluster_guid': cluster_guid, 'service': service, 'g': g}))

  # ReplicationControllers
  rcontrollers_list = kubernetes.get_rcontrollers(gs)
  for rcontroller in rcontrollers_list:
    input_queue.put((
        gs.get_random_priority(),
        _do_compute_rcontroller,
        {'gs': gs, 'cluster_guid': cluster_guid, 'rcontroller': rcontroller,
         'g': g}))

  # Wait until worker threads finished processing all outstanding requests.
  # Once we return from the join(), all output was generated already.
  input_queue.join()

  # Convert any exception caught by the worker threads to an exception
  # raised by the current thread.
  if not output_queue.empty():
    msg = output_queue.get_nowait()  # should not fail.
    gs.logger_error(msg)
    raise collector_error.CollectorError(msg)

  # Keep the relations_to_timestamps mapping for next call.
  gs.set_relations_to_timestamps(g.get_relations_to_timestamps())

  # Dump the resulting graph
  return g.dump(gs, output_format)
Ejemplo n.º 26
0
  def test_cluster(self):
    """Test the '/cluster' endpoint."""
    start_time = utilities.now()
    end_time = None
    for _ in range(2):
      # Exercise the collector. Read data from golden files and compute
      # a context graph.
      # The second iteration should read from the cache.
      ret_value = self.app.get('/cluster')
      if end_time is None:
        end_time = utilities.now()
      result = json.loads(ret_value.data)
      # The timestamps of the second iteration should be the same as in the
      # first iteration, because the data of the 2nd iteration should be
      # fetched from the cache, and it did not change.
      # Even if fetching the data caused an explicit reading from the files
      # in the second iteration, the data did not change, so it should keep
      # its original timestamp.
      self.verify_resources(result, start_time, end_time)

      self.assertEqual(5, self.count_relations(
          result, 'contains', 'Cluster', 'Node'))
      self.assertEqual(6, self.count_relations(
          result, 'contains', 'Cluster', 'Service'))
      self.assertEqual(3, self.count_relations(
          result, 'contains', 'Cluster', 'ReplicationController'))
      self.assertEqual(1, self.count_relations(
          result, 'contains', 'Node', 'Container'))
      self.assertEqual(4, self.count_relations(
          result, 'contains', 'Pod', 'Container'))
      self.assertEqual(7, self.count_relations(
          result, 'contains', 'Container', 'Process'))

      self.assertEqual(26, self.count_relations(result, 'contains'))
      self.assertEqual(3, self.count_relations(result, 'createdFrom'))
      self.assertEqual(7, self.count_relations(result, 'loadBalances'))
      self.assertEqual(6, self.count_relations(result, 'monitors'))
      self.assertEqual(14, self.count_relations(result, 'runs'))

      # Verify that all relations contain a timestamp in the range
      # [start_time, end_time].
      self.assertTrue(isinstance(result.get('relations'), types.ListType))
      for r in result['relations']:
        self.assertTrue(isinstance(r, types.DictType))
        timestamp = r.get('timestamp')
        self.assertTrue(utilities.valid_string(timestamp))
        self.assertTrue(start_time <= timestamp <= end_time)

      # The overall timestamp must be in the expected range.
      self.assertTrue(utilities.valid_string(result.get('timestamp')))
      self.assertTrue(start_time <= result['timestamp'] <= end_time)

      json_output = json.dumps(result, sort_keys=True)
      self.assertEqual(2, json_output.count('"alternateLabel": '))
      self.assertEqual(99, json_output.count('"createdBy": '))

      # Wait a little to ensure that the current time is greater than
      # end_time
      time.sleep(1)
      self.assertTrue(utilities.now() > end_time)

    # Change the timestamp of the nodes in the cache.
    timestamp_before_update = utilities.now()
    gs = collector.app.context_graph_global_state
    nodes, timestamp_seconds = gs.get_nodes_cache().lookup('')
    self.assertTrue(isinstance(nodes, types.ListType))
    self.assertTrue(start_time <=
                    utilities.seconds_to_timestamp(timestamp_seconds) <=
                    end_time)
    # Change the first node to force the timestamp in the cache to change.
    # We have to change both the properties of the first node and its
    # timestamp, so the cache will store the new value (including the new
    # timestamp).
    self.assertTrue(len(nodes) >= 1)
    self.assertTrue(utilities.is_wrapped_object(nodes[0], 'Node'))
    nodes[0]['properties']['newAttribute123'] = 'the quick brown fox jumps over'
    nodes[0]['timestamp'] = utilities.now()
    gs.get_nodes_cache().update('', nodes)
    timestamp_after_update = utilities.now()
    _, timestamp_seconds = gs.get_nodes_cache().lookup('')
    self.assertTrue(timestamp_before_update <=
                    utilities.seconds_to_timestamp(timestamp_seconds) <=
                    timestamp_after_update)

    # Build the context graph again.
    ret_value = self.app.get('/cluster')
    result = json.loads(ret_value.data)
    self.verify_resources(result, start_time, timestamp_after_update)

    # Verify that all relations contain a timestamp in the range
    # [start_time, end_time].
    self.assertTrue(isinstance(result.get('relations'), types.ListType))
    for r in result['relations']:
      self.assertTrue(isinstance(r, types.DictType))
      timestamp = r.get('timestamp')
      self.assertTrue(utilities.valid_string(timestamp))
      self.assertTrue(start_time <= timestamp <= end_time)

    # The overall timestamp must be in the expected range.
    self.assertTrue(utilities.valid_string(result.get('timestamp')))
    self.assertTrue(timestamp_before_update <= result['timestamp'] <=
                    timestamp_after_update)
Ejemplo n.º 27
0
 def __init__(self,msg,fromfunction=None,showTimestamp=True):
   self.msg=msg
   self.time = utilities.now()
   self.fromfunction=fromfunction
   self.showTimestamp=showTimestamp
Ejemplo n.º 28
0
def _do_compute_graph(gs, input_queue, output_queue, output_format):
    """Returns the context graph in the specified format.

  Args:
    gs: the global state.
    input_queue: the input queue for the worker threads.
    output_queue: output queue containing exceptions data from the worker
        threads.
    output_format: one of 'graph', 'dot', 'context_graph', or 'resources'.

  Returns:
    A successful response in the specified format.

  Raises:
    CollectorError: inconsistent or invalid graph data.
  """
    assert isinstance(gs, global_state.GlobalState)
    assert isinstance(input_queue, Queue.PriorityQueue)
    assert isinstance(output_queue, Queue.Queue)
    assert utilities.valid_string(output_format)

    g = ContextGraph()
    g.set_version(docker.get_version(gs))
    g.set_metadata({'timestamp': utilities.now()})
    g.set_relations_to_timestamps(gs.get_relations_to_timestamps())

    # Nodes
    nodes_list = kubernetes.get_nodes_with_metrics(gs)
    if not nodes_list:
        return g.dump(gs, output_format)

    # Find the timestamp of the oldest node. This will be the timestamp of
    # the cluster.
    oldest_timestamp = utilities.now()
    for node in nodes_list:
        assert utilities.is_wrapped_object(node, 'Node')
        # note: we cannot call min(oldest_timestamp, node['timestamp']) here
        # because min(string) returnes the smallest character in the string.
        if node['timestamp'] < oldest_timestamp:
            oldest_timestamp = node['timestamp']

    # Get the cluster name from the first node.
    # The cluster name is an approximation. It is not a big deal if it
    # is incorrect, since the aggregator knows the cluster name.
    cluster_name = utilities.node_id_to_cluster_name(nodes_list[0]['id'])
    cluster_guid = 'Cluster:' + cluster_name
    g.set_title(cluster_name)
    g.add_resource(cluster_guid, {'label': cluster_name}, 'Cluster',
                   oldest_timestamp, {})

    # Nodes
    for node in nodes_list:
        input_queue.put((gs.get_random_priority(), _do_compute_node, {
            'gs': gs,
            'input_queue': input_queue,
            'cluster_guid': cluster_guid,
            'node': node,
            'g': g
        }))

    # Services
    for service in kubernetes.get_services(gs):
        input_queue.put((gs.get_random_priority(), _do_compute_service, {
            'gs': gs,
            'cluster_guid': cluster_guid,
            'service': service,
            'g': g
        }))

    # ReplicationControllers
    rcontrollers_list = kubernetes.get_rcontrollers(gs)
    for rcontroller in rcontrollers_list:
        input_queue.put((gs.get_random_priority(), _do_compute_rcontroller, {
            'gs': gs,
            'cluster_guid': cluster_guid,
            'rcontroller': rcontroller,
            'g': g
        }))

    # Wait until worker threads finished processing all outstanding requests.
    # Once we return from the join(), all output was generated already.
    input_queue.join()

    # Convert any exception caught by the worker threads to an exception
    # raised by the current thread.
    if not output_queue.empty():
        msg = output_queue.get_nowait()  # should not fail.
        gs.logger_error(msg)
        raise collector_error.CollectorError(msg)

    # Keep the relations_to_timestamps mapping for next call.
    gs.set_relations_to_timestamps(g.get_relations_to_timestamps())

    # Dump the resulting graph
    return g.dump(gs, output_format)