Пример #1
0
    def delete(self, project_id, service_id):
        """delete.

        Archive local configuration storage
        """
        # delete local configuration from storage
        args = {
            'project_id': project_id,
            'service_id': uuid.UUID(str(service_id)),
        }

        # get the existing service
        stmt = query.SimpleStatement(
            CQL_GET_SERVICE, consistency_level=self._driver.consistency_level)
        resultset = self.session.execute(stmt, args)
        complete_result = list(resultset)

        result = complete_result[0]

        if result:
            domains_list = [
                json.loads(d).get('domain')
                for d in result.get('domains', []) or []
            ]
            # NOTE(obulpathi): Convert a OrderedMapSerializedKey to a Dict
            pds = result.get('provider_details', {}) or {}
            pds = {key: value for key, value in pds.items()}

            if self._driver.archive_on_delete:
                archive_args = {
                    'project_id': result.get('project_id'),
                    'service_id': result.get('service_id'),
                    'service_name': result.get('service_name'),
                    'flavor_id': result.get('flavor_id'),
                    'domains': result.get('domains', []),
                    'origins': result.get('origins', []),
                    'caching_rules': result.get('caching_rules', []),
                    'restrictions': result.get('restrictions', []),
                    'provider_details': pds,
                    'operator_status': result.get('operator_status',
                                                  'enabled'),
                    'archived_time': datetime.datetime.utcnow(),
                    'domains_list': query.ValueSequence(domains_list)
                }

                # archive and delete the service
                stmt = query.SimpleStatement(
                    CQL_ARCHIVE_SERVICE,
                    consistency_level=self._driver.consistency_level)
                self.session.execute(stmt, archive_args)
            else:
                delete_args = {
                    'project_id': result.get('project_id'),
                    'service_id': result.get('service_id'),
                    'domains_list': query.ValueSequence(domains_list)
                }
                stmt = query.SimpleStatement(
                    CQL_DELETE_SERVICE,
                    consistency_level=self._driver.consistency_level)
                self.session.execute(stmt, delete_args)
Пример #2
0
def select(sess, stmt, ids):
    a = []
    if ids is None or len(ids) < 1:
        return a

    rs = sess.execute(stmt, parameters=[query.ValueSequence(ids)])
    for f in rs:
        a.append(f)

    return a
Пример #3
0
    def ego_graph(self, snid: int):
        row = self.cassandra.session().execute(self.query1, [str(snid)],
                                               timeout=60).one()
        if not row or not row.friends:
            return None, None
        friends = row.friends.split()
        nodes = {int(_id) for _id in friends}
        edge_nodes = {}  # only nodes present in edges
        edges = set()

        rows = self.cassandra.session().execute(
            self.query2, [cquery.ValueSequence(friends)], timeout=60).all()
        for row in rows:
            if not row.friends:
                continue
            i = None
            for _id in map(int, row.friends.split()):
                if _id not in nodes:
                    continue
                if i is None:
                    i = edge_nodes.setdefault(int(row.snid), len(edge_nodes))
                j = edge_nodes.setdefault(_id, len(edge_nodes))
                edges.add(sort_pair(i, j))

        new_friends = map(str, edge_nodes)
        rows = self.cassandra.session().execute(
            self.query3, [cquery.ValueSequence(new_friends)],
            timeout=60).all()

        new_nodes = [None] * len(edge_nodes)
        for cur_snid, i in edge_nodes.items():
            new_nodes[i] = {'snid': cur_snid, 'name': 'not found'}

        for row in rows:
            cur_snid = int(row.snid)
            i = edge_nodes[cur_snid]
            name = row.profile_text_name if row.profile_text_name else row.name
            new_nodes[i]['name'] = name

        return new_nodes, edges
Пример #4
0
def send_partition(entries, table_name, crit_size=500):
    """
    Collects rdd entries and sends as batch of CQL commands.
    Required by "save_to_database" function.
    """

    # Initializes keyspace and CQL batch executor in Cassandra database
    db_session = cassc.Cluster(p["cassandra"]).connect(p["cassandra_key"])
    cql_batch = cassq.BatchStatement(consistency_level= \
                                     cass.ConsistencyLevel.QUORUM)
    batch_size = 0

    # Prepares CQL statement, with interpolated table name, and placeholders
    cql_command = db_session.prepare("""
                                     UPDATE {} SET
                                     metric =  ? + metric
                                     WHERE group = ?
                                     AND cycle = ?
                                     AND id = ?;
                                     """.format(table_name))

    for e in entries:

        # Interpolates prepared CQL statement with values from entry
        cql_batch.add(cql_command, parameters= \
                      [cassq.ValueSequence((e[3],)), \
                       e[0], \
                       e[1], \
                       e[2],])
        batch_size += 1
        # Executes collected CQL commands, then re-initializes collection
        if batch_size == crit_size:
            db_session.execute(cql_batch)
            cql_batch = cassq.BatchStatement(consistency_level= \
                                             cass.ConsistencyLevel.QUORUM)
            batch_size = 0

    # Executes final set of remaining batches and closes Cassandra session
    db_session.execute(cql_batch)
    db_session.shutdown()

    return None
Пример #5
0
    def update(self, project_id, service_id, service_obj):
        """update.

        :param project_id
        :param service_id
        :param service_obj
        """

        service_name = service_obj.name
        domains = [json.dumps(d.to_dict()) for d in service_obj.domains]
        origins = [json.dumps(o.to_dict()) for o in service_obj.origins]
        caching_rules = [
            json.dumps(caching_rule.to_dict())
            for caching_rule in service_obj.caching
        ]
        restrictions = [
            json.dumps(r.to_dict()) for r in service_obj.restrictions
        ]

        pds = {
            provider:
            json.dumps(service_obj.provider_details[provider].to_dict())
            for provider in service_obj.provider_details
        }

        log_delivery = json.dumps(service_obj.log_delivery.to_dict())
        # fetch current domains
        args = {
            'project_id': project_id,
            'service_id': uuid.UUID(str(service_id)),
        }
        stmt = query.SimpleStatement(
            CQL_GET_SERVICE, consistency_level=self._driver.consistency_level)

        resultset = self.session.execute(stmt, args)
        complete_results = list(resultset)
        result = complete_results[0]

        # updates an existing service
        args = {
            'project_id': project_id,
            'service_id': uuid.UUID(str(service_id)),
            'service_name': service_name,
            'flavor_id': service_obj.flavor_id,
            'domains': domains,
            'origins': origins,
            'caching_rules': caching_rules,
            'restrictions': restrictions,
            'provider_details': pds,
            'log_delivery': log_delivery,
            'operator_status': service_obj.operator_status
        }

        stmt = query.SimpleStatement(
            CQL_UPDATE_SERVICE,
            consistency_level=self._driver.consistency_level)
        self.session.execute(stmt, args)

        # claim new domains
        batch_claim = query.BatchStatement(
            consistency_level=self._driver.consistency_level)
        for d in service_obj.domains:
            domain_args = {
                'domain_name': d.domain,
                'project_id': project_id,
                'service_id': uuid.UUID(str(service_id))
            }
            batch_claim.add(query.SimpleStatement(CQL_CLAIM_DOMAIN),
                            domain_args)
        self.session.execute(batch_claim)

        # NOTE(TheSriram): We claim (CQL_CLAIM_DOMAIN) all the domains,
        # that got passed in. Now we create a set out of domains_new
        # (current domains present) and domains_old (domains present before
        # we made the current call). The set difference between old and new,
        # are the domains we need to delete (CQL_RELINQUISH_DOMAINS).

        domains_old = set([
            json.loads(d).get('domain')
            for d in result.get('domains', []) or []
        ])
        domains_new = set([json.loads(d).get('domain') for d in domains or []])

        # delete domains that no longer exist
        # relinquish old domains

        domains_delete = domains_old.difference(domains_new)
        if domains_delete:
            args = {'domain_list': query.ValueSequence(domains_delete)}
            stmt = query.SimpleStatement(
                CQL_RELINQUISH_DOMAINS,
                consistency_level=self._driver.consistency_level)
            self.session.execute(stmt, args)
Пример #6
0
cluster = Cluster(['10.10.1.1', '10.10.1.1'])
session = cluster.connect('testkeyspace_raw')

start = timer()
data = {}

pre = session.prepare("SELECT * FROM mnist_raw WHERE id IN ?");

dataset_size = 60000
idx = [i for i in range(0, dataset_size)]
batch_size = 1
batch_num = int(math.ceil(dataset_size / batch_size))

for epoch in range(1, 2):
    np.random.seed(9)
    np.random.shuffle(idx)
    print(idx[0:5])
    start = time.time()
    for i in range(0, batch_num):
        idx_batch = idx[batch_size * i:min(dataset_size, batch_size * (i + 1))]
        #print(mnist_collection.count_documents({}))
        # down_dict format {id, image, label}
        bind = pre.bind(query.ValueSequence([idx_batch]))
        rows = session.execute(bind)
        for row in rows:
            data.update({row.id: [row.image, row.label]}) 
    end = time.time()
    print(f"epoch time: {end - start}s")

 
Пример #7
0
    def update(self, project_id, service_id, service_obj):
        """update.

        :param project_id
        :param service_id
        :param service_obj
        """

        service_name = service_obj.name
        domains = [json.dumps(d.to_dict())
                   for d in service_obj.domains]
        origins = [json.dumps(o.to_dict())
                   for o in service_obj.origins]
        caching_rules = [json.dumps(caching_rule.to_dict())
                         for caching_rule in service_obj.caching]
        restrictions = [json.dumps(r.to_dict())
                        for r in service_obj.restrictions]

        pds = {provider:
               json.dumps(service_obj.provider_details[provider].to_dict())
               for provider in service_obj.provider_details}

        log_delivery = json.dumps(service_obj.log_delivery.to_dict())
        # fetch current domains
        args = {
            'project_id': project_id,
            'service_id': uuid.UUID(str(service_id)),
        }
        stmt = query.SimpleStatement(
            CQL_GET_SERVICE,
            consistency_level=self._driver.consistency_level)
        results = self.session.execute(stmt, args)
        result = results[0]

        # updates an existing service
        args = {
            'project_id': project_id,
            'service_id': uuid.UUID(str(service_id)),
            'service_name': service_name,
            'flavor_id': service_obj.flavor_id,
            'domains': domains,
            'origins': origins,
            'caching_rules': caching_rules,
            'restrictions': restrictions,
            'provider_details': pds,
            'log_delivery': log_delivery,
            'operator_status': service_obj.operator_status
        }

        stmt = query.SimpleStatement(
            CQL_UPDATE_SERVICE,
            consistency_level=self._driver.consistency_level)
        self.session.execute(stmt, args)

        # relinquish old domains
        stmt = query.SimpleStatement(
            CQL_RELINQUISH_DOMAINS,
            consistency_level=self._driver.consistency_level)
        domain_list = [json.loads(d).get('domain')
                       for d in result.get('domains', []) or []]
        args = {
            'domain_list': query.ValueSequence(domain_list)
        }
        self.session.execute(stmt, args)

        # claim new domains
        batch_claim = query.BatchStatement(
            consistency_level=self._driver.consistency_level)
        for d in service_obj.domains:
            domain_args = {
                'domain_name': d.domain,
                'project_id': project_id,
                'service_id': uuid.UUID(str(service_id))
            }
            batch_claim.add(query.SimpleStatement(CQL_CLAIM_DOMAIN),
                            domain_args)
        self.session.execute(batch_claim)