Ejemplo n.º 1
0
    def test_metadata_updated(self):
        """Ensures metadata is updated."""
        def json_request(*args, **kwargs):
            return {'metadata': {'fingerprint': 'fingerprint', 'items': []}}

        def set_metadata(*args, **kwargs):
            return collections.namedtuple('operation', ['url'])(url='url')

        self.mock(metadata.net, 'json_request', json_request)
        self.mock(metadata.gce.Project, 'set_metadata', set_metadata)

        key = models.Instance(
            key=instances.get_instance_key(
                'base-name',
                'revision',
                'zone',
                'instance-name',
            ),
            active_metadata_update=models.MetadataUpdate(metadata={
                'key': 'value',
            }, ),
        ).put()
        models.InstanceGroupManager(key=key.parent(), ).put()
        models.InstanceTemplateRevision(
            key=key.parent().parent(),
            project='project',
        ).put()
        expected_url = 'url'

        metadata.update(key)
        self.assertEqual(key.get().active_metadata_update.url, expected_url)
Ejemplo n.º 2
0
    def post(self):
        """Schedules a metadata update for the given Instance.

    Params:
      key: URL-safe key for a models.Instance.
    """
        key = ndb.Key(urlsafe=self.request.get('key'))
        assert key.kind() == 'Instance', key
        metadata.update(key)
Ejemplo n.º 3
0
  def post(self):
    """Schedules a metadata update for the given Instance.

    Params:
      key: URL-safe key for a models.Instance.
    """
    key = ndb.Key(urlsafe=self.request.get('key'))
    assert key.kind() == 'Instance', key
    metadata.update(key)
def read_service(srv, path, prev_cluster_metadata):
    preferred = preferred_cluster(srv["clusters"])
    if preferred == 0:
        srv_path = os.path.join(path, srv["preprocessed_filename"])
        df = pd.read_csv(srv_path, sep="\t", index_col='time', parse_dates=True)
        for c in df.columns:
            df[c] = zscore(df[c])
    else:
        cluster = srv["clusters"][str(preferred)]
        rep_metrics, df = best_column_of_cluster(srv["name"], cluster["filenames"], path, prev_cluster_metadata)

    # write additional metadata about components:
    #   - the preferred cluster for the component (is it really necessary?)
    #   - the representative metrics for each cluster
    with metadata.update(path) as data:
        for _service in data["services"]:
            if _service["name"] == srv["name"]:
                if "pref_cluster" not in _service:
                    _service["pref_cluster"] = preferred
                if preferred == 0:
                    continue
                if "rep_metrics" not in _service["clusters"][str(preferred)]:
                    _service["clusters"][str(preferred)]["rep_metrics"] = rep_metrics

    new_names = []
    for column in df.columns:
        if column.startswith(srv["name"]):
            new_names.append(column)
        else:
            new_names.append(srv["name"] + APP_METRIC_DELIMITER + column)
    df.columns = new_names
    return df
Ejemplo n.º 5
0
    def test_parent_not_found(self):
        """Ensures nothing happens when the parent doesn't exist."""
        key = models.Instance(
            key=instances.get_instance_key(
                'base-name',
                'revision',
                'zone',
                'instance-name',
            ),
            active_metadata_update=models.MetadataUpdate(metadata={
                'key': 'value',
            }, ),
        ).put()

        metadata.update(key)
        self.failIf(key.get().active_metadata_update.url)
Ejemplo n.º 6
0
    def test_project_unspecified(self):
        """Ensures nothing happens when project is unspecified."""
        key = models.Instance(
            key=instances.get_instance_key(
                'base-name',
                'revision',
                'zone',
                'instance-name',
            ),
            active_metadata_update=models.MetadataUpdate(metadata={
                'key': 'value',
            }, ),
        ).put()
        models.InstanceGroupManager(key=key.parent(), ).put()
        models.InstanceTemplateRevision(key=key.parent().parent(), ).put()

        metadata.update(key)
        self.failIf(key.get().active_metadata_update.url)
Ejemplo n.º 7
0
    def create_layer(self, model, l_name, l_enabled, l_metadata={}):
        if self.has_layer(l_name):
            raise KeyExists(l_name)

        # Create the layer.
        layer = Layer(mapscript.layerObj(self.ms))

        # Configure the layer according to the model.
        model.configure_layer(layer, l_enabled)

        layer.ms.name = l_name
        layer.ms.template = "foo.html"
        # layer.enable()

        # Add metadata.
        metadata = {
            "wms_srs": self.get_metadata("ows_srs"),
            }
        metadata.update(l_metadata)
        layer.update_metadatas(metadata)

        # Set default style.
        layer.set_default_style(self)
Ejemplo n.º 8
0
    def create_layer(self, model, l_name, l_enabled, l_metadata={}):
        if self.has_layer(l_name):
            raise KeyExists(l_name)

        # Create the layer.
        layer = Layer(mapscript.layerObj(self.ms))

        # Configure the layer according to the model.
        model.configure_layer(layer, l_enabled)

        layer.ms.name = l_name
        layer.ms.template = "foo.html"
        # layer.enable()

        # Add metadata.
        metadata = {
            "wms_srs": self.get_metadata("ows_srs"),
            }
        metadata.update(l_metadata)
        layer.update_metadatas(metadata)

        # Set default style.
        layer.set_default_style(self)
Ejemplo n.º 9
0
def cluster_service(path, service, cluster_size, prev_metadata=None):

    filename = os.path.join(path, service["preprocessed_filename"])
    df = pd.read_csv(filename, sep="\t", index_col='time', parse_dates=True)

    initial_idx = None
    if prev_metadata:
        initial_idx = get_initial_clustering(service["name"], prev_metadata,
                                             df.columns)
        # adjust cluster_size if an initial assigment has been found
        if initial_idx is not None:
            cluster_size = len(np.unique(initial_idx))

    prefix = "%s/%s-cluster-%d" % (path, service["name"], cluster_size)
    if os.path.exists(prefix + "_1.png"):
        print("skip " + prefix)
        return (None, None)

    cluster_metrics, score, filenames = do_kshape(prefix, df, cluster_size,
                                                  initial_idx)
    if cluster_size < 2:
        # no silhouette_score for cluster size 1
        return (None, None)
    print("silhouette_score: %f" % score)

    # protect the write access to the metadata file
    metadata_lock.acquire()
    with metadata.update(path) as data:
        for srv in data["services"]:
            if srv["name"] == service["name"]:
                if "clusters" not in srv:
                    srv["clusters"] = {}
                d = dict(silhouette_score=score,
                         filenames=filenames,
                         metrics=cluster_metrics)
                srv["clusters"][cluster_size] = d
    metadata_lock.release()

    return (service["name"], cluster_size)
Ejemplo n.º 10
0
 def test_not_found(self):
     """Ensures nothing happens when the entity doesn't exist."""
     key = ndb.Key(models.Instance, 'fake-key')
     metadata.update(key)
     self.failIf(key.get())
Ejemplo n.º 11
0
def trigger_update():
	metadata.update()
	return flask.Response(status = 204)
Ejemplo n.º 12
0
 def get(self):
   metadata.update()
   self.response.headers['Content-Type'] = 'text/plain'
   self.response.out.write('Metadata will now be updated in the background.')