Esempio n. 1
0
def getByID(objid):
    """
    We rebuild the object from its id. The id can either be:
    block: UUID (eg. f291f008-a520-11e6-b42e-5b582e04fd70)
    storageobj: UUID_(version) (eg. f291f008-a520-11e6-b42e-5b582e04fd70_1)

    Args:
        objid (str):  object identifier

    Returns:
         (Block| Storageobj)

    """
    """
               TODO
               Args:
                   objid (str):  object identifier
               Returns:
                    (Block| Storageobj)
               """
    from hecuba import log
    from hecuba.IStorage import IStorage

    try:
        from hecuba import config
        query = "SELECT * FROM hecuba.istorage WHERE storage_id = %s"
        results = config.session.execute(query, [uuid.UUID(objid)])[0]
    except Exception as e:
        log.error("Query %s failed", query)
        raise e

    log.debug("IStorage API:getByID(%s) of class %s", objid,
              results.class_name)
    return IStorage.build_remotely(results._asdict())
Esempio n. 2
0
 def delete_persistent(self):
     """
     Method to empty all data assigned to a StorageDict.
     """
     query = "TRUNCATE TABLE %s.%s;" % (self._ksp, self._table)
     log.debug('DELETE PERSISTENT: %s', query)
     config.session.execute(query)
Esempio n. 3
0
 def stop_persistent(self):
     """
         The StorageObj stops being persistent, but keeps the information already stored in Cassandra
     """
     log.debug("STOP PERSISTENT")
     for persistent_dict in self._persistent_dicts:
         persistent_dict.stop_persistent()
Esempio n. 4
0
    def __init__(self,
                 primary_keys,
                 columns,
                 name,
                 qbeast_meta,
                 qbeast_id=None,
                 entry_point=None,
                 storage_id=None,
                 tokens=None):
        """
        Creates a new block.
        Args:
            table_name (string): the name of the collection/table
            keyspace_name (string): name of the Cassandra keyspace.
            primary_keys (list(tuple)): a list of (key,type) primary keys (primary + clustering).
            columns (list(tuple)): a list of (key,type) columns
            tokens (list): list of tokens
            storage_id (uuid): the storage id identifier
        """
        log.debug(
            "CREATED QbeastIterator(%s,%s,%s,%s)",
            storage_id,
            tokens,
        )
        self._selects = map(lambda a: a[0], primary_keys + columns)
        key_namedtuple = namedtuple("key", map(lambda a: a[0], primary_keys))
        value_namedtuple = namedtuple("value", map(lambda a: a[0], columns))
        div = len(primary_keys)
        self._row_builder = lambda a: self._row_namedtuple(
            key_namedtuple(*a[:div]), value_namedtuple(*a[div:]))
        (self._ksp, self._table) = self._extract_ks_tab(name)
        self._qbeast_meta = qbeast_meta
        self._qbeast_id = qbeast_id
        self._entry_point = entry_point
        if tokens is None:
            log.info('using all tokens')
            tokens = map(lambda a: a.value,
                         config.cluster.metadata.token_map.ring)
            self._tokens = IStorage._discrete_token_ranges(tokens)
        else:
            self._tokens = tokens

        class_name = '%s.%s' % (self.__class__.__module__,
                                self.__class__.__name__)

        # primary_keys columns name tokens
        # indexed_args nonindexed_args value_list
        # mem_filter port storage_id class_name
        if storage_id is None:
            self._storage_id = uuid.uuid4()
            save = True
        else:
            self._storage_id = storage_id
            save = False
        self._build_args = self._building_args(primary_keys, columns, name,
                                               qbeast_meta, qbeast_id,
                                               entry_point, self._storage_id,
                                               self._tokens, class_name)
        if save:
            self._store_meta(self._build_args)
Esempio n. 5
0
    def get(self, name, attribute=None):
        """
        Returns a filtered json object with a specific cluster and its
        attributes when a HTTP GET request is made.

        Args:
            name (str): The unique name of the cluster
            attribute (Optional|str): The name of a unique cluster attribute
                to show. Defaults to None.
        """
        log.debug("Checking if cluster %s exists." % name)
        rs = models.Cluster.query.get(name)
        if (rs is None):
            log.debug("Cluster %s doesn't exist." % name)
            abort(404, message="Cluster {} doesn't exist.".format(name))
        
        c = rs.serialize

        if attribute is None:
            # grab all nodes from current cluster
            c['nodes'] = []
            log.debug("Getting all nodes for cluster %s." % name)
            nodes = rs.nodes.all()
            for n in nodes:
                c['nodes'].append(n.serialize)

            return c
        else:
            try:
                log.debug("Returning cluster %s attribute %s with value %s." % (name, attribute, c[attribute]))
                return { attribute: c[attribute] }
            except KeyError:
                log.debug("Client requested attribute %s from cluster %s which is invalid." % (attribute, name))
                return { 'message': "Requested attribute is invalid." }, 400
Esempio n. 6
0
    def get(self, name, attribute=None):
        """
        Returns a filtered json object with a specific node and its
        attributes when a HTTP GET request is made.

        Args:
            name (str): The unique name of the node
            attribute (Optional|str): The name of a unique node attribute
                to show. Defaults to None.
        """
        log.debug("Checking if node %s exists." % name)
        n = models.Node.query.get(name)
        if (n is None):
            log.debug("Node %s doesn't exist." % name)
            abort(404, message="Node {} doesn't exist.".format(name))

        n = n.serialize

        if attribute is None:
            log.debug("Showing node %s." % name)
            return n
        else:
            try:
                log.debug("Returning node %s attribute %s with value %s." % (name, attribute, n[attribute]))
                return { attribute: n[attribute] }
            except KeyError:
                log.debug("Client requested attribute %s from node %s which is invalid." % (attribute, name))
                return { 'message': "Requested attribute is invalid." }, 400
Esempio n. 7
0
 def stop_persistent(self):
     """
     Method to turn a StorageDict into non-persistent.
     """
     log.debug('STOP PERSISTENCE: %s', self._table)
     self._is_persistent = False
     self._hcache = None
Esempio n. 8
0
    def split(self):
        '''
        config.partition_strategy == "DYNAMIC"
        Data will be partitioned in config.number_of_partitions different chunks
        :return: an iterator over partitions
        '''
        st = time.time()
        tokens = self._father._build_args.tokens

        for token_split in self._tokens_partitions(
                tokens, config.min_number_of_tokens):
            storage_id = uuid.uuid4()
            log.debug('assigning to %s %d  tokens', str(storage_id),
                      len(token_split))
            new_args = self._father._build_args._replace(tokens=token_split,
                                                         storage_id=storage_id)
            partitioned_object = self._father.__class__.build_remotely(
                new_args._asdict())
            config.session.execute(self._prepared_store_id, [
                self._partitioning_uuid, partitioned_object._storage_id,
                config.number_of_partitions
            ])
            self._idle_nodes -= 1
            yield partitioned_object
        log.debug('completed split of %s in %f',
                  self._father.__class__.__name__,
                  time.time() - st)
Esempio n. 9
0
def error_handler():
    """
    Returns HTTP Forbidden when a authentication error happens.

    Also used as a callback to Flask-HTTPAuth not authenticating the user.
    """
    log.debug("Access was denied while requesting a view.")
    abort(403, message="Access denied.")
Esempio n. 10
0
 def __setitem__(self, key, val):
     """
        Method to insert values in the StorageDict
        Args:
            key: the position of the value that we want to save
            val: the value that we want to save in that position
     """
     if isinstance(val, np.ndarray):
         val = StorageNumpy(val)
     log.debug('SET ITEM %s->%s', key, val)
     if not config.hecuba_type_checking:
         if not self._is_persistent:
             dict.__setitem__(self, key, val)
         else:
             if isinstance(val, IStorage) and not val._is_persistent:
                 attribute = val.__class__.__name__.lower()
                 count = self._count_name_collision(attribute)
                 # new name as ksp+table+obj_class_name
                 val.make_persistent(self._ksp + '.' + self._table + "_" +
                                     attribute + "_" + str(count))
             self._hcache.put_row(self._make_key(key),
                                  self._make_value(val))
     else:
         if isinstance(val, Iterable) and not isinstance(val, str):
             col_types = map(
                 lambda x: IStorage._conversions[x.__class__.__name__], val)
             spec_col_types = map(lambda x: x[1], self._columns)
             for idx, value in enumerate(spec_col_types):
                 if value == 'double':
                     spec_col_types[idx] = 'float'
         else:
             col_types = IStorage._conversions[val.__class__.__name__]
             spec_col_types = map(lambda x: x[1], self._columns)[0]
             if spec_col_types == 'double':
                 spec_col_types = 'float'
         if isinstance(key, Iterable) and not isinstance(key, str):
             key_types = map(
                 lambda x: IStorage._conversions[x.__class__.__name__], key)
             spec_key_types = map(lambda x: x[1], self._primary_keys)
             for idx, value in enumerate(spec_key_types):
                 if value == 'double':
                     spec_key_types[idx] = 'float'
         else:
             key_types = IStorage._conversions[key.__class__.__name__]
             spec_key_types = map(lambda x: x[1], self._primary_keys)[0]
             if spec_key_types == 'double':
                 spec_key_types = 'float'
         if (col_types == spec_col_types):
             if (key_types == spec_key_types):
                 if not self._is_persistent:
                     dict.__setitem__(self, key, val)
                 else:
                     self._hcache.put_row(self._make_key(key),
                                          self._make_value(val))
             else:
                 raise KeyError
         else:
             raise ValueError
Esempio n. 11
0
    def build_remotely(result):
        """
        Launches the StorageDict.__init__ from the api.getByID
        Args:
            result: a namedtuple with all  the information needed to create again the StorageDict
        """
        log.debug("Building Storage dict with %s", result)

        return StorageDict(result.name, result.primary_keys, result.columns,
                           result.tokens, result.storage_id, result.indexed_on)
Esempio n. 12
0
 def build_remotely(new_args):
     """
         Launches the StorageNumpy.__init__ from the uuid api.getByID
         Args:
             new_args: a list of all information needed to create again the StorageNumpy
         Returns:
             so: the created StorageNumpy
     """
     log.debug("Building StorageNumpy object with %s", new_args)
     return StorageNumpy(new_args.storage_id)
Esempio n. 13
0
    def delete_persistent(self):
        """
            Deletes the Cassandra table where the persistent StorageObj stores data
        """
        self._is_persistent = False

        query = "DELETE FROM %s.%s WHERE storage_id = %s;" % (
            self._ksp, self._table + '_numpies', self._storage_id)
        log.debug("DELETE PERSISTENT: %s", query)
        config.session.execute(query)
Esempio n. 14
0
    def get(self):
        """
        Returns a filtered json object with the all nodes and their
        attributes when a HTTP GET request is made.
        """
        log.debug("Getting nodes collection information.")
        nodes = []
        for n in models.Node.query.all():
            nodes.append(n.serialize)

        return { 'nodes': marshal(nodes, models.node_fields) }
Esempio n. 15
0
    def get(self):
        """
        Returns a filtered json object with the all clusters and their
        attributes when a HTTP GET request is made.
        """
        log.debug("Getting clusters collection information.")
        clusters = []
        for c in models.Cluster.query.all():
            clusters.append(c.serialize)

        return { 'clusters': marshal(clusters, models.cluster_fields) }
Esempio n. 16
0
    def build_remotely(result):
        """
        Launches the Block.__init__ from the api.getByID
        Args:
            result: a namedtuple with all  the information needed to create again the block
        """
        log.debug("Building Storage dict with %s", result)

        return QbeastIterator(result.primary_keys, result.columns, result.name,
                              result.qbeast_meta, result.qbeast_id,
                              result.entry_point, result.storage_id,
                              result.tokens)
Esempio n. 17
0
def generate_password(password):
    """
    Generates a bcrypt hash using "BCRYPT_ROUNDS" rounds from configuration

    Args:
        password (str): The password that will be hashed

    Returns:
        str: The hashed password
    """
    log.debug("Generating password with %s bcrypt rounds" % hecuba.config['BCRYPT_ROUNDS'])
    return bcrypt.hashpw(password, bcrypt.gensalt(hecuba.config['BCRYPT_ROUNDS']))
Esempio n. 18
0
    def __setattr__(self, attribute, value):
        """
            Given a key and its value, this function saves it (depending on if it's persistent or not):
                a) In memory
                b) In the DB
            Args:
                attribute: name of the value that we want to set
                value: value that we want to save
        """
        if attribute[0] is '_':
            object.__setattr__(self, attribute, value)
            return

        if attribute in self._persistent_attrs:
            if config.hecuba_type_checking and value is not None and not isinstance(value, dict) and \
                            IStorage._conversions[value.__class__.__name__] != self._persistent_props[attribute][
                        'type']:
                raise TypeError

            if not isinstance(value, IStorage):
                if isinstance(value, np.ndarray):
                    value = StorageNumpy(value)
                elif isinstance(value, dict):
                    per_dict = self._persistent_props[attribute]
                    indexed_args = per_dict.get('indexed_values', None)
                    new_value = StorageDict(None,
                                            per_dict['primary_keys'],
                                            per_dict['columns'],
                                            tokens=self._tokens,
                                            indexed_args=indexed_args)
                    new_value.update(value)
                    value = new_value

            if self._is_persistent:
                if issubclass(value.__class__, IStorage):
                    if not value._is_persistent:
                        count = self._count_name_collision(attribute)
                        value.make_persistent(self._ksp + '.' + self._table +
                                              '_' + attribute + '_' +
                                              str(count))
                    values = [self._storage_id, value._storage_id]
                else:
                    values = [self._storage_id, value]

                query = "INSERT INTO %s.%s (storage_id,%s)" % (
                    self._ksp, self._table, attribute)
                query += " VALUES (%s,%s)"

                log.debug("SETATTR: ", query)
                config.session.execute(query, values)

        object.__setattr__(self, attribute, value)
Esempio n. 19
0
    def _store_meta(storage_args):
        log.debug("QbeastIterator: storing metas %s", '')

        try:
            config.session.execute(QbeastIterator._prepared_store_meta, [
                storage_args.primary_keys, storage_args.columns,
                storage_args.name, storage_args.qbeast_meta,
                storage_args.qbeast_id, storage_args.entry_point,
                storage_args.storage_id, storage_args.tokens,
                storage_args.class_name
            ])
        except Exception as ex:
            log.error("Error creating the StorageDictIx metadata: %s %s",
                      storage_args, ex)
            raise ex
Esempio n. 20
0
 def _store_meta(storage_args):
     """
         Saves the information of the object in the istorage table.
         Args:.
             storage_args (object): contains all data needed to restore the object from the workers
     """
     log.debug("StorageObj: storing media %s", storage_args)
     try:
         config.session.execute(StorageNumpy._prepared_store_meta, [
             storage_args.storage_id, storage_args.class_name,
             storage_args.name
         ])
     except Exception as ex:
         log.warn("Error creating the StorageNumpy metadata with args: %s" %
                  str(storage_args))
         raise ex
Esempio n. 21
0
    def delete_persistent(self):
        """
            Deletes the Cassandra table where the persistent StorageObj stores data
        """
        self._is_persistent = False

        if hasattr(self, '_persistent_dicts'):
            for pers_dict in self._persistent_dicts:
                pers_dict.delete_persistent()

        if hasattr(self, '_storage_objs'):
            for so in self._storage_objs:
                so.delete_persistent()

        query = "TRUNCATE TABLE %s.%s;" % (self._ksp, self._table)
        log.debug("DELETE PERSISTENT: %s", query)
        config.session.execute(query)
Esempio n. 22
0
    def _store_meta(storage_args):
        """
        Method to update the info about the StorageDict in the DB metadata table
        Args:
            storage_args: structure with all data needed to update the metadata
        """
        log.debug("StorageDict: storing metas %s", storage_args)

        try:
            config.session.execute(StorageDict._prepared_store_meta, [
                storage_args.storage_id, storage_args.class_name,
                storage_args.name, storage_args.tokens,
                storage_args.primary_keys, storage_args.columns,
                storage_args.indexed_on
            ])
        except Exception as ex:
            log.error("Error creating the StorageDict metadata: %s %s",
                      storage_args, ex)
            raise ex
Esempio n. 23
0
    def split(self):
        '''
        config.partition_strategy == "SIMPLE"
        Data will be partitioned in config.number_of_partitions different chunks
        :return: an iterator over partitions
        '''
        st = time.time()
        tokens = self._father._build_args.tokens

        for token_split in self._tokens_partitions(
                tokens, config.min_number_of_tokens,
                config.number_of_partitions):
            storage_id = uuid.uuid4()
            log.debug('assigning to %s %d  tokens', str(storage_id),
                      len(token_split))
            new_args = self._father._build_args._replace(tokens=token_split,
                                                         storage_id=storage_id)
            yield self._father.__class__.build_remotely(new_args._asdict())
        log.debug('completed split of %s in %f',
                  self._father.__class__.__name__,
                  time.time() - st)
Esempio n. 24
0
    def delete(self, name):
        """
        Deletes an existing cluster

        Args:
            name (str): The name of the cluster that will be deleted
        """
        # check if http auth user has permission to modify
        auth.check_permission(auth.http_auth.username(), "hecuba_admin")

        log.debug("Checking if cluster %s exists." % name)
        c = models.Cluster.query.get(name)
        if (c is None):
            log.debug("Cluster %s doesn't exist." % name)
            abort(404, message="Cluster {} doesn't exist.".format(name))

        # grab all nodes from current cluster to delete
        log.info("Deleting all nodes from cluster %s." % name)
        nodes = c.nodes.all()
        for n in nodes:
            db.session.delete(n)
            try:
                db.session.commit()
                log.info("Node %s deleted." % n.name)
            except Exception as ex:
                db.session.rollback()
                log.debug(ex)
                return { 'message': "Error removing nodes from cluster." }, 500
 
        log.debug("Deleting cluster %s." % name)
        db.session.delete(c)
        try:
            db.session.commit()
            log.info("Cluster %s was deleted." % name)
        except Exception as ex:
            db.session.rollback()
            log.debug(ex)
            return { 'message': "Error removing cluster." }, 500

        return { 'message': "Cluster removed." }
Esempio n. 25
0
    def post(self):
        """
        Creates a new cluster based on data provided into a POST request.
        
        The data can be provided in the body as JSON or HTTP form-data.

        Note:
            The "hecuba_admin" cluster name is reserved and cannot be used.

        HTTP Data:
            name (str): The name of the cluster that will be created
            secret (Optional|str): Password for managing/updating the cluster
        """
        args = self.reqparse.parse_args()

        # check if http auth user has permission to modify
        auth.check_permission(auth.http_auth.username(), "hecuba_admin")

        # hash secret if needed
        if (args['secret']):
            hashed = auth.generate_password(args['secret'])
        else:
            hashed = None

        # hecuba_admin cluster name is reserved for global authentication
        if (args['name'] == "hecuba_admin"):
            return { 'message': "Cannot create cluster: name \"hecuba_admin\" reserved." }, 403

        # added if doesn't exist
        c = models.Cluster(name=args['name'], secret=hashed)
        db.session.add(c)
        try:
            db.session.commit()
            log.info("Created cluster named %s." % args['name'])
        except IntegrityError:
            db.session.rollback()
            log.debug("Tried to create a cluster named %s, but it already exists." % args['name'])
            return { 'message': "Cluster already exists." }, 409
        else:
            return { 'clusters': marshal(c, models.cluster_fields) }, 201, { 'Location': "/cluster/%s" % args['name'] }
Esempio n. 26
0
    def delete(self, name):
        """
        Deletes an existing node

        Args:
            name (str): The name of the node that will be deleted
        """
        log.debug("Checking if node %s exists." % name)
        n = models.Node.query.get(name)
        if (n is None):
            log.debug("Node %s doesn't exist." % name)
            abort(404, message="Node {} doesn't exist.".format(name))

        # check if http auth user has permission to modify
        auth.check_permission(auth.http_auth.username(), n.cluster_name)

        db.session.delete(n)

        try:
            db.session.commit()
            log.info("Node %s was deleted." % name)
        except Exception as ex:
            log.debug(ex)
            return { 'message': "Error removing node." }, 500

        return { 'message': "Node removed." }
Esempio n. 27
0
File: api.py Progetto: him-28/hecuba
def getByID(objid):
    """
    We rebuild the object from its id. The id can either be:
    block: UUID (eg. f291f008-a520-11e6-b42e-5b582e04fd70)
    storageobj: UUID_(version) (eg. f291f008-a520-11e6-b42e-5b582e04fd70_1)

    Args:
        objid (str):  object identifier

    Returns:
         (Block| Storageobj)

    """
    """
               TODO
               Args:
                   objid (str):  object identifier
               Returns:
                    (Block| Storageobj)
               """
    from hecuba import log
    try:
        from hecuba import config
        query = "SELECT * FROM hecuba.istorage WHERE storage_id = %s"
        results = config.session.execute(query, [uuid.UUID(objid)])[0]
    except Exception as e:
        log.error("Query %s failed", query)
        raise e
    class_name = results.class_name

    log.debug("IStorage API:getByID(%s) of class %s", objid, class_name)
    last = 0
    for key, i in enumerate(class_name):
        if i == '.' and key > last:
            last = key
    module = class_name[:last]
    cname = class_name[last + 1:]
    mod = __import__(module, globals(), locals(), [cname], 0)
    b = getattr(mod, cname).build_remotely(results)
    return b
Esempio n. 28
0
    def post(self):
        """
        Creates a new node based on data provided into a POST request.
        
        The data can be provided in the body as JSON or HTTP form-data.

            HTTP Data:
                name (str): The name of the node that will be created
                cluster_name (str): The cluster that the node will belong
        """
        args = self.reqparse.parse_args()

        # check if cluster_name actually exists
        log.debug("Checking if cluster %s exists." % args['cluster_name'])
        c = models.Cluster.query.get(args['cluster_name'])
        if c is None:
            log.debug("Cluster %s doesn't exist." % args['cluster_name'])
            abort(404, message="Cluster {} doesn't exist.".format(args['cluster_name']))

        # check if http auth user has permission to modify
        auth.check_permission(auth.http_auth.username(), args['cluster_name'])

        # added if doesn't exist
        n = models.Node(name=args['name'], cluster_name=args['cluster_name'])
        db.session.add(n)
        try:
            db.session.commit()
            log.info("Node %s added to cluster %s." % (args['name'], args['cluster_name']))
        except IntegrityError:
            log.debug("Tried to add node %s but it already exists." % args['name'])
            return { 'message': "Node already exists." }, 409

        return { 'nodes': marshal(n, models.node_fields) }, 201, { 'Location': "/node/%s" % args['name'] }
Esempio n. 29
0
    def split(self):
        """
        Method used to divide an object into sub-objects.
        Returns:
            a subobject everytime is called
        """
        st = time()
        tokens = self._build_args.tokens

        for token_split in IStorage._tokens_partitions(
                tokens, config.min_number_of_tokens,
                config.number_of_partitions):
            storage_id = uuid.uuid4()
            log.debug('assigning to %s %d  tokens', str(storage_id),
                      len(token_split))
            new_args = self._build_args._replace(tokens=token_split,
                                                 storage_id=storage_id)
            self.__class__._store_meta(new_args)

            yield self.__class__.build_remotely(new_args)
        log.debug('completed split of %s in %f', self.__class__.__name__,
                  time() - st)
Esempio n. 30
0
def getByID(objid):
    """
    We rebuild the object from its id.

    Args:
        objid (str):  object identifier

    Returns:
         Hecuba Object

    """
    """
               TODO
               Args:
                   objid (str):  object identifier
               Returns:
                    (Block| Storageobj)
               """
    from hecuba import log
    from hecuba.IStorage import build_remotely
    from hecuba import config
    from hecuba import StorageNumpy, StorageDict
    from hecuba import StorageObj as StorageObject
    import uuid

    query = "SELECT * FROM hecuba.istorage WHERE storage_id = %s"

    if isinstance(objid, str):
        objid = uuid.UUID(objid)

    results = config.session.execute(query, [objid])
    if not results:
        raise RuntimeError("Object {} not found on hecuba.istorage".format(objid))

    results = results[0]

    log.debug("IStorage API:getByID(%s) of class %s", objid, results.class_name)
    return build_remotely(results._asdict())
Esempio n. 31
0
    def build_remotely(new_args):
        """
            Launches the StorageObj.__init__ from the uuid api.getByID
            Args:
                new_args: a list of all information needed to create again the storageobj
            Returns:
                so: the created storageobj
        """
        log.debug("Building Storage object with %s", new_args)
        class_name = new_args.class_name
        if class_name is 'StorageObj':
            so = StorageObj(new_args.name.encode('utf8'), new_args.tokens,
                            new_args.storage_id, new_args.istorage_props)

        else:
            class_name, mod_name = IStorage.process_path(class_name)
            mod = __import__(mod_name, globals(), locals(), [class_name], 0)

            so = getattr(mod, class_name)(new_args.name.encode('utf8'),
                                          new_args.tokens, new_args.storage_id,
                                          new_args.istorage_props)

        return so
Esempio n. 32
0
 def __getattr__(self, attribute):
     """
         Given an attribute, this function returns the value, obtaining it from either:
         a) memory
         b) the Database
         Args:
             attribute: name of the value that we want to obtain
         Returns:
             value: obtained value
     """
     if attribute[
             0] != '_' and self._is_persistent and attribute in self._persistent_attrs:
         try:
             query = "SELECT %s FROM %s.%s WHERE storage_id = %s;" \
                     % (attribute, self._ksp,
                        self._table,
                        self._storage_id)
             log.debug("GETATTR: %s", query)
             result = config.session.execute(query)
             for row in result:
                 for row_key, row_var in vars(row).iteritems():
                     if row_var is not None:
                         if isinstance(row_var, list) and isinstance(
                                 row_var[0], unicode):
                             new_toreturn = []
                             for entry in row_var:
                                 new_toreturn.append(str(entry))
                             return new_toreturn
                         else:
                             return row_var
                     else:
                         raise AttributeError
         except Exception as ex:
             log.warn("GETATTR ex %s", ex)
             raise AttributeError('value not found')
     else:
         return object.__getattribute__(self, attribute)
Esempio n. 33
0
    def __getitem__(self, key):
        """
        If the object is persistent, each request goes to the hfetch.
        Args:
             key: the dictionary key
        Returns
             item: value found in position key
        """
        log.debug('GET ITEM %s', key)

        if not self._is_persistent:
            to_return = dict.__getitem__(self, key)
            return to_return
        else:
            cres = self._hcache.get_row(self._make_key(key))
            log.debug("GET ITEM %s[%s]", cres, cres.__class__)

            final_results = []
            for index, (name, col_type) in enumerate(self._columns):
                if col_type not in IStorage._basic_types:
                    table_name = self._ksp + '.' + self._table
                    element = (self._build_istorage_obj(
                        col_type, table_name, uuid.UUID(cres[index])))
                else:
                    element = cres[index]
                final_results.append(element)

            cres = final_results
            if issubclass(cres.__class__, NoneType):
                return None
            elif self._column_builder is not None:
                if len(cres) > 0 and isinstance(cres[0], list):
                    return [self._column_builder(*row) for row in cres]
                else:
                    return self._column_builder(*cres)
            else:
                return cres[0]
Esempio n. 34
0
def check_permission(auth_user, cluster):
    """
    Checks if an authenticated user has access to a cluster

    Args:
        auth_user (str): The HTTP-Auth username provided
        cluster (str): The name of the cluster

    Returns:
        mixed: True if user has permission. Call error_handler if user doesn't
            have permission.
    """

    # operations that don't need cluster secret
    if (cluster == "hecuba_admin"):
        if (hecuba.config['ADMIN_SECRET'] and auth_user == "hecuba_admin"):
            log.debug("Action access granted using hecuba_admin")
            return True
        elif (not hecuba.config['ADMIN_SECRET']):
            return True
        else:
            log.debug("Action access denied using hecuba_admin")
            error_handler()

    # operations that uses cluster secret
    c = models.Cluster.query.get(cluster)
    if (c is not None):
        if (c.secret is not None):
            if (hecuba.config['ADMIN_SECRET'] and auth_user == "hecuba_admin"):
                log.debug("Access on cluster %s granted using hecuba_admin" % cluster)
                return True
            elif (auth_user != cluster):
                log.debug("Access denied to username \"%s\" on cluster %s" % (auth_user, cluster))
                error_handler()
        return True
    else:
        error_handler()
Esempio n. 35
0
def verify_password(cluster, password, ws=True):
    """
    Checks if password is valid for a cluster or whole program.

    Also used as a callback to Flask-HTTPAuth verify the password.

    Args:
        cluster (str): The cluster name for checking its password. If "hecuba_admin" 
            is provided, assume the password is for the global scope (whole
            program, configured with the ADMIN_SECRET variable on the 
            configuration file).
        password (str): The password to be checked. It will be hashed and
            compared to the stored one.
        ws (bool): If true, this means the method is called on a web server and
            it will return an error_handler(). If false, it will simple return
            false (for using it at the manager cli). Defaults to true.
    """
    # specific cluster or free authentication
    if (cluster != "hecuba_admin" and cluster != ""):
        log.debug("Verifying credentials for cluster %s" % cluster)
        c = models.Cluster.query.get(cluster)
        if (c is None):
            # cluster doesn't exist
            if (ws == True):
                error_handler()
            else:
                return False
        
        if (c.secret):
            return bcrypt.hashpw(password, c.secret) == c.secret
        else:
            return password == "None"
    else:
        if hecuba.config['ADMIN_SECRET'] and cluster == "hecuba_admin":
            log.debug("Verifying credentials for hecuba_admin")
            hashed = hecuba.config['ADMIN_SECRET']
            return bcrypt.hashpw(password, hashed) == hashed
        else:
            log.debug("Admin secret disabled. Request can be served.")
            return True
Esempio n. 36
0
    def __init__(self,
                 name=None,
                 tokens=None,
                 storage_id=None,
                 istorage_props=None,
                 **kwargs):
        """
            Creates a new storageobj.
            Args:
                name (string): the name of the Cassandra Keyspace + table where information can be found
                tokens (list of tuples): token ranges assigned to the new StorageObj
                storage_id (string):  an unique storageobj identifier
                istorage_props dict(string,string): a map with the storage id of each contained istorage object.
                kwargs: more optional parameters
        """
        log.debug("CREATED StorageObj(%s)", name)
        self._is_persistent = False
        self._persistent_dicts = []
        self._storage_objs = []

        if name is None:
            self._ksp = config.execution_name
        else:
            (self._ksp, self._table) = self._extract_ks_tab(name)

        self._persistent_props = StorageObj._parse_comments(self.__doc__)
        self._persistent_attrs = self._persistent_props.keys()

        if tokens is None:
            # log.info('using all tokens')
            tokens = map(lambda a: a.value,
                         config.cluster.metadata.token_map.ring)
            self._tokens = IStorage._discrete_token_ranges(tokens)
        else:
            self._tokens = tokens

        self._storage_id = storage_id
        self._istorage_props = istorage_props

        self._class_name = '%s.%s' % (self.__class__.__module__,
                                      self.__class__.__name__)
        if name is not None:
            self._build_args = self.args(self._ksp + '.' + self._table,
                                         self._tokens, self._storage_id,
                                         self._istorage_props,
                                         self._class_name)

        dictionaries = filter(lambda (k, t): t['type'] == 'StorageDict',
                              self._persistent_props.iteritems())
        for table_name, per_dict in dictionaries:
            dict_name = "%s.%s" % (self._ksp, table_name)

            if istorage_props is not None and dict_name in istorage_props:
                args = config.session.execute(IStorage._select_istorage_meta,
                                              (istorage_props[dict_name], ))[0]
                # The internal objects must have the same father's tokens
                args = args._replace(tokens=self._tokens)
                log.debug("CREATING INTERNAL StorageDict with %s", args)
                pd = StorageDict.build_remotely(args)
            else:
                if 'indexed_values' in per_dict:
                    indexed_args = per_dict['indexed_values']
                else:
                    indexed_args = None
                pd = StorageDict(None,
                                 per_dict['primary_keys'],
                                 per_dict['columns'],
                                 tokens=self._tokens,
                                 indexed_args=indexed_args)
            setattr(self, table_name, pd)
            self._persistent_dicts.append(pd)

        storageobjs = filter(
            lambda (k, t): t['type'] not in IStorage._basic_types and t['type']
            != 'StorageDict', self._persistent_props.iteritems())
        for table_name, per_dict in storageobjs:
            so_name = "%s.%s" % (self._ksp, table_name)
            cname, module = IStorage.process_path(per_dict['type'])
            mod = __import__(module, globals(), locals(), [cname], 0)
            so = getattr(mod, cname)()
            setattr(self, table_name, so)

        if name is not None:
            self.make_persistent(name)
Esempio n. 37
0
    def __init__(self,
                 name=None,
                 primary_keys=None,
                 columns=None,
                 tokens=None,
                 storage_id=None,
                 indexed_args=None,
                 **kwargs):
        """
        Creates a new StorageDict.

        Args:
            name (string): the name of the collection/table (keyspace is optional)
            primary_keys (list(tuple)): a list of (key,type) primary keys (primary + clustering).
            columns (list(tuple)): a list of (key,type) columns
            tokens (list): list of tokens
            storage_id (string): the storage id identifier
            indexed_args (list): values that will be used as index
            kwargs: other parameters
        """

        super(StorageDict, self).__init__(**kwargs)
        self._is_persistent = False
        log.debug("CREATED StorageDict(%s,%s,%s,%s,%s,%s)", primary_keys,
                  columns, name, tokens, storage_id, kwargs)

        if tokens is None:
            log.info('using all tokens')
            tokens = map(lambda a: a.value,
                         config.cluster.metadata.token_map.ring)
            self._tokens = IStorage._discrete_token_ranges(tokens)
        else:
            self._tokens = tokens

        self._storage_id = storage_id

        if self.__doc__ is not None:
            self._persistent_props = self._parse_comments(self.__doc__)
            self._primary_keys = self._persistent_props[
                self.__class__.__name__]['primary_keys']
            self._columns = self._persistent_props[
                self.__class__.__name__]['columns']
            try:
                self._indexed_args = self._persistent_props[
                    self.__class__.__name__]['indexed_values']
            except KeyError:
                self._indexed_args = indexed_args
        else:
            self._primary_keys = primary_keys
            self._columns = columns
            self._indexed_args = indexed_args

        key_names = [pkname for (pkname, dt) in self._primary_keys]
        column_names = [colname for (colname, dt) in self._columns]
        self._item_builder = namedtuple('row', key_names + column_names)

        if len(key_names) > 1:
            self._key_builder = namedtuple('row', key_names)
        else:
            self._key_builder = None
        if len(column_names) > 1:
            self._column_builder = namedtuple('row', column_names)
        else:
            self._column_builder = None

        self._k_size = len(key_names)

        class_name = '%s.%s' % (self.__class__.__module__,
                                self.__class__.__name__)
        self._build_args = self.args(name, self._primary_keys, self._columns,
                                     self._tokens, self._storage_id,
                                     self._indexed_args, class_name)

        if name is not None:
            self.make_persistent(name)
        else:
            self._is_persistent = False
Esempio n. 38
0
    def make_persistent(self, name):
        """
        Method to transform a StorageDict into a persistent object.
        This will make it use a persistent DB as the main location
        of its data.
        Args:
            name:
        """
        if self._is_persistent:
            raise AlreadyPersistentError(
                "This StorageDict is already persistent [Before:{}.{}][After:{}]",
                self._ksp, self._table, name)
        self._is_persistent = True
        (self._ksp, self._table) = self._extract_ks_tab(name)

        if self._storage_id is None:
            self._storage_id = uuid.uuid3(uuid.NAMESPACE_DNS,
                                          self._ksp + '.' + self._table)
        self._build_args = self._build_args._replace(
            storage_id=self._storage_id, name=self._ksp + "." + self._table)
        self._store_meta(self._build_args)
        if config.id_create_schema == -1:
            query_keyspace = "CREATE KEYSPACE IF NOT EXISTS %s WITH replication = %s" % (
                self._ksp, config.replication)
            try:
                log.debug('MAKE PERSISTENCE: %s', query_keyspace)
                config.session.execute(query_keyspace)
            except Exception as ex:
                log.warn("Error creating the StorageDict keyspace %s, %s",
                         (query_keyspace), ex)
                raise ex

        for key, value in dict.iteritems(self):
            if issubclass(value.__class__, IStorage):
                # new name as ksp+table+obj_class_name
                val_name = self._ksp + '.' + self._table + type(
                    value).__name__.lower()
                value.make_persistent(val_name)

        columns = self._primary_keys + self._columns
        for ind, entry in enumerate(columns):
            n = StorageDict._other_case.match(entry[1])
            if n is not None:
                iter_type, intra_type = n.groups()
            else:
                iter_type = entry[1]
            if iter_type not in IStorage._basic_types:
                columns[ind] = entry[0], 'uuid'

        pks = map(lambda a: a[0], self._primary_keys)
        query_table = "CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));" \
                      % (self._ksp,
                         self._table,
                         ",".join("%s %s" % tup for tup in columns),
                         str.join(',', pks))
        try:
            log.debug('MAKE PERSISTENCE: %s', query_table)
            config.session.execute(query_table)
        except Exception as ex:
            log.warn("Error creating the StorageDict table: %s %s",
                     query_table, ex)
            raise ex
        key_names = map(lambda a: a[0].encode('UTF8'), self._primary_keys)
        column_names = self._columns

        self._hcache_params = (self._ksp, self._table, self._storage_id,
                               self._tokens, key_names,
                               map(lambda x: {
                                   "name": x[0],
                                   "type": x[1]
                               }, column_names), {
                                   'cache_size': config.max_cache_size,
                                   'writer_par': config.write_callbacks_number,
                                   'write_buffer': config.write_buffer_size
                               })
        log.debug("HCACHE params %s", self._hcache_params)
        self._hcache = Hcache(*self._hcache_params)
        # Storing all in-memory values to cassandra
        for key, value in dict.iteritems(self):
            self._hcache.put_row(self._make_key(key), self._make_value(value))
        if hasattr(self, '_indexed_args') and self._indexed_args is not None:
            index_query = 'CREATE CUSTOM INDEX IF NOT EXISTS ' + self._table + '_idx ON '
            index_query += self._ksp + '.' + self._table + ' (' + str.join(
                ',', self._indexed_args) + ') '
            index_query += "using 'es.bsc.qbeast.index.QbeastIndex';"
            try:
                config.session.execute(index_query)
            except Exception as ex:
                log.error("Error creating the Qbeast custom index: %s %s",
                          index_query, ex)
                raise ex
Esempio n. 39
0
    def put(self, name):
        """
        Updates an existing node based on data provided into a PUT request.

        Args:
            name (str): The name of the node that will be updated
        """
        # get node and corresponding cluster
        log.debug("Checking if node %s exists." % name)
        n = models.Node.query.get(name)
        if (n is None):
            log.debug("Node %s doesn't exist." % name)
            abort(404, message="Node {} doesn't exist.".format(name))

        # check if http auth user has permission to modify
        auth.check_permission(auth.http_auth.username(), n.cluster_name)

        c = models.Cluster.query.get(n.cluster_name)

        # only update node if cluster task is not running 
        if (n.running == False and c.running == True):
            log.debug("Cluster %s has a task running, cannot update node %s." % (c.name, name))
            return { 'message': "Cluster task already running. Cannot update." }, 403

        # parse arguments from request
        args = self.reqparse.parse_args()
        for key, value in args.iteritems():
            if (value != None and value != getattr(n, key)):
                log.debug("Setting node '%s' attribute '%s' to: %s." % (name, key, value))
                setattr(n, key, value)
        n.last_updated = datetime.datetime.utcnow()

        # lock the cluster if needed
        if (n.running == True and c.running == False):
            log.debug("Trying to lock cluster %s." % c.name)
            rs = db.session.query(models.Cluster).filter_by(
                name=n.cluster_name,
                running=0
            ).update({
                "running": True,
                "last_updated": datetime.datetime.utcnow()
            })

            if (rs):
                log.info("Locked cluster %s because node '%s' will run the task." % (c.name, name))
            else:
                log.debug("Cluster %s has a task running, cannot update node %s." % (c.name, name))
                return { 'message': "Cluster task already running. Cannot update." }, 403

        # or free the cluster and update node
        elif (n.running == False and c.running == True):
            rs = db.session.query(models.Cluster).filter_by(
                name=n.cluster_name,
                running=1
            ).update({
                "running": False,
                "last_updated": datetime.datetime.utcnow()
            })

            if (rs):
                log.info("Unlocked cluster %s because node %s completed the task." % (c.name, name))
            else:
                log.debug("Cluster %s hasn't a task running, cannot update node %s." % (c.name, name))
                return { 'message': "Cluster task not running. Cannot update." }, 403

        try:
            db.session.commit()
            log.info("Node %s was updated." % (name))
        except IntegrityError:
            db.session.rollback()
        except Exception as ex:
            log.debug(ex)
            return { 'message': "Error updating node." }, 500

        return { 'message': "Node updated." }, 204
Esempio n. 40
0
    def get(self):
        """
        Checks internal commands and database and report if OK or not when
        a HTTP GET request is made.
        """
        log.debug("Performing a health-check.")
        log.debug("Health-check: Testing version view")
        try:
            v = Version_Handler()
            v = v.get()
        except:
            log.debug("Health-check: Error testing version view")
            return {
                'message': 'Error testing version view.'
            }, 500

        log.debug("Health-check: Testing models and database")
        try:
            c = models.Cluster.query.first()
            n = models.Node.query.first()
        except:
            log.debug("Health-check: Error testing models and database")
            return {
                'message': 'Error testing models and database'
            }, 500

        log.debug("Health-check passed.")
        return {
            'message': 'All checks passed.'
        }
Esempio n. 41
0
"""
    hecuba.auth
    ~~~~~~~~~~~~

    Authentication classes, functions and decorators

    :copyright: (c) 2016 by Hugo Cisneiros.
    :license: GPLv2, see LICENSE for more details.
"""
from hecuba import hecuba, db, models, log
from flask.ext.httpauth import HTTPBasicAuth
from flask_restful import abort
import bcrypt

# setup http basic auth
log.debug("Setting up HTTP Basic Auth capability")
http_auth = HTTPBasicAuth()

@http_auth.error_handler
def error_handler():
    """
    Returns HTTP Forbidden when a authentication error happens.

    Also used as a callback to Flask-HTTPAuth not authenticating the user.
    """
    log.debug("Access was denied while requesting a view.")
    abort(403, message="Access denied.")

@http_auth.verify_password
def verify_password(cluster, password, ws=True):
    """