Esempio n. 1
0
    def __init__(
        self, record=None, user=None, group=None, lastname=None,
        firstname=None, mail=None, groups=[], authkey=None, *args, **kargs
    ):
        super(Account, self).__init__()

        self.user = user or "anonymous"
        self.groups = groups
        self.group = group or "group.anonymous"
        self.shadowpasswd = None

        self.authkey = authkey or self.generate_new_authkey()

        self.lastname = lastname
        self.firstname = firstname
        self.mail = mail

        self.type = "account"

        self._id = self.type + "." + self.user

        self.access_owner = ['r', 'w']
        self.access_group = []
        self.access_other = []
        self.access_unauth = []

        self.external = False

        if isinstance(record, Record):
            Record.__init__(self, _id=self._id, record=record, *args, **kargs)
        else:
            Record.__init__(self, _id=self._id, *args, **kargs)
Esempio n. 2
0
    def test_03_InitFromRecord(self):
        record = Record(self.data)

        record2 = Record(record=record)

        if record2.data != self.data:
            raise Exception('Data corruption ...')
Esempio n. 3
0
    def test_17_CheckWriteRights(self):
        # Insert with user account
        record = Record({'check': 'test7'})
        STORAGE.put(record, account=self.user_account)

        ## try to remove with anonymous account
        self.assertRaises(
            ValueError, STORAGE.remove, record, self.anonymous_account)

        ## Change rights
        record.chgrp('group.anonymous')
        record.chmod('g+w')
        STORAGE.put(record)

        ## try to remove with anonymous account
        STORAGE.remove(record, account=self.anonymous_account)
Esempio n. 4
0
 def load(self, dump):
     Record.load(self, dump)
     self.user = self.data['user']
     self.lastname = self.data['lastname']
     self.firstname = self.data['firstname']
     self.mail = self.data['mail']
     self.groups = self.data['groups']
     self.external = self.data.get('external', self.external)
     '''
     if len(self.groups) > 0:
         if self.groups[0] == self.group:
             self.groups.pop(0)
     '''
     self.shadowpasswd = self.data['shadowpasswd']
     if 'authkey' in self.data:
         self.authkey = self.data['authkey']
Esempio n. 5
0
 def load(self, dump):
     Record.load(self, dump)
     self.user = self.data['user']
     self.lastname = self.data['lastname']
     self.firstname = self.data['firstname']
     self.mail = self.data['mail']
     self.groups = self.data['groups']
     self.external = self.data.get('external', self.external)
     '''
     if len(self.groups) > 0:
         if self.groups[0] == self.group:
             self.groups.pop(0)
     '''
     self.shadowpasswd = self.data['shadowpasswd']
     if 'authkey' in self.data:
         self.authkey = self.data['authkey']
Esempio n. 6
0
    def test_07_enable(self):
        record = Record(self.data)

        record.set_enable()
        if not record.is_enable():
            raise Exception('Impossible to enable ...')

        record.set_disable()
        if record.is_enable():
            raise Exception('Impossible to disable ...')
Esempio n. 7
0
def go(account, nb):
    storage.account = account
    ## Insert 1000 records
    insert_nb = nb
    timer.start()
    for i in range(0, insert_nb):
        record = Record({'number': i})
        storage.put(record)
    timer.stop()
    insert_speed = int(insert_nb / timer.elapsed)

    ## Read all records
    timer.start()
    records = storage.find()
    timer.stop()
    read_nb = len(records)
    read_speed = int(read_nb / timer.elapsed)

    ## Update records
    new_records = []
    for record in records:
        record.data = {'check': 'update'}
        new_records.append(record)

    update_nb = len(new_records)
    timer.start()
    records = storage.put(new_records)
    timer.stop()
    update_speed = int(update_nb / timer.elapsed)

    ## Remove all records
    timer.start()
    storage.remove(records)
    timer.stop()
    remove_nb = len(records)
    remove_speed = int(remove_nb / timer.elapsed)

    print(
        " + Insert Speed:", insert_speed, "records/s (%s records)" % insert_nb)
    print(
        " + Read Speed:", read_speed, "records/s (%s records)" % read_nb)
    print(
        " + Update Speed:", update_speed, "records/s (%s records)" % update_nb)
    print(
        " + Remove Speed:", remove_speed, "records/s (%s records)" % remove_nb)
Esempio n. 8
0
    def process_insert_operations_collection(self, operations, collection):

        self.stats['insert ' + collection] += len(operations)

        if operations:
            # is there any event to process ?
            backend = self.storage.get_backend(collection)
            bulk = backend.initialize_unordered_bulk_op()
            for operation in operations:
                record = Record(operation['event'])
                record.type = "event"
                event = record.dump()
                bulk.insert(event)
            try:
                bulk.execute({'w': 0})
            except BulkWriteError as bwe:
                self.logger.warning(pp.pformat(bwe.details))
            self.logger.info(u'inserted log events {}'.format(len(operations)))
Esempio n. 9
0
    def process_insert_operations_collection(self, operations, collection):

        self.stats['insert ' + collection] += len(operations)

        if operations:
            # is there any event to process ?
            backend = self.storage.get_backend(collection)
            bulk = backend.initialize_unordered_bulk_op()
            for operation in operations:
                record = Record(operation['event'])
                record.type = "event"
                event = record.dump()
                bulk.insert(event)
            try:
                bulk.execute({'w': 0})
            except BulkWriteError as bwe:
                self.logger.warning(bwe.details)
            self.logger.info('inserted log events {}'.format(len(operations)))
Esempio n. 10
0
def save_records(ws, namespace, ctype, _id, items):

    records = []

    for data in items:
        m_id = data.pop('_id', None)
        mid = data.pop('id', None)
        _id = m_id or mid or _id

        record = None

        # Try to fetch existing record for update
        if _id:
            try:
                record = ws.db.get(_id, namespace=namespace)

            except KeyError:
                pass  # record is None here

        if record:
            for key in data.keys():
                record.data[key] = data[key]

            record.name = data.get('crecord_name', record.name)

        else:
            cname = data.pop('crecord_name', 'noname')
            record = Record(_id=_id, data=data, name=cname, _type=ctype)

        try:
            _id = ws.db.put(record, namespace=namespace)

            drecord = record.dump()
            drecord['_id'] = str(_id)
            drecord['id'] = drecord['_id']
            records.append(drecord)

        except Exception as err:
            ws.logger.error(u'Impossible to save record: {0}'.format(
                err
            ))

    return records
Esempio n. 11
0
def save_records(ws, namespace, ctype, _id, items):

    records = []

    for data in items:
        m_id = data.pop('_id', None)
        mid = data.pop('id', None)
        _id = m_id or mid or _id

        record = None

        # Try to fetch existing record for update
        if _id:
            try:
                record = ws.db.get(_id, namespace=namespace)

            except KeyError:
                pass  # record is None here

        if record:
            for key in data.keys():
                record.data[key] = data[key]

            record.name = data.get('crecord_name', record.name)

        else:
            cname = data.pop('crecord_name', 'noname')
            record = Record(_id=_id, data=data, name=cname, _type=ctype)

        try:
            _id = ws.db.put(record, namespace=namespace)

            drecord = record.dump()
            drecord['_id'] = str(_id)
            drecord['id'] = drecord['_id']
            records.append(drecord)

        except Exception as err:
            ws.logger.error(u'Impossible to save record: {0}'.format(err))

    return records
Esempio n. 12
0
    def test_07_enable(self):
        record = Record(self.data)

        record.set_enable()
        if not record.is_enable():
            raise Exception('Impossible to enable ...')

        record.set_disable()
        if record.is_enable():
            raise Exception('Impossible to disable ...')
Esempio n. 13
0
 def make_record(self, _id):
     record = Record()
     record.type = "cache"
     #record._id = 'cache.'+_id
     record._id = _id
     record.access_owner = ['r', 'w']
     record.access_group = []
     record.access_other = []
     record.access_unauth = []
     return record
Esempio n. 14
0
    def test_02_InitFromRaw(self):
        raw = {
            '_id': None,
            'crecord_name': 'titi',
            'crecord_type': 'raw',
            'crecord_write_time': None,
            'enable': True,
            'mydata1': 'data1',
            'mydata3': 'data3',
            'mydata2': 'data2',
            'crecord_type': 'raw'
        }

        record = Record(raw_record=raw)

        dump = record.dump()

        if not isinstance(dump['_id'], type(None)):
            raise Exception('Invalid _id type')

        if record.data != self.data:
            raise Exception('Data corruption ...')
Esempio n. 15
0
    def __init__(self,
                 record=None,
                 user=None,
                 group=None,
                 lastname=None,
                 firstname=None,
                 mail=None,
                 groups=[],
                 authkey=None,
                 *args,
                 **kargs):
        super(Account, self).__init__()

        self.user = user or "anonymous"
        self.groups = groups
        self.group = group or "group.anonymous"
        self.shadowpasswd = None

        self.authkey = authkey or self.generate_new_authkey()

        self.lastname = lastname
        self.firstname = firstname
        self.mail = mail

        self.type = "account"

        self._id = self.type + "." + self.user

        self.access_owner = ['r', 'w']
        self.access_group = []
        self.access_other = []
        self.access_unauth = []

        self.external = False

        if isinstance(record, Record):
            Record.__init__(self, _id=self._id, record=record, *args, **kargs)
        else:
            Record.__init__(self, _id=self._id, *args, **kargs)
Esempio n. 16
0
def do_update(json_data, collection):

    record = Record({}).dump()

    for key in json_data:
        record[key] = json_data[key]

    compare_record = record.copy()

    hooks(record)

    if DEBUG and record != compare_record:
        print 'Differences found\n # before \n{}\n\n # after\n {}'.format(
            pp.pformat(compare_record),
            pp.pformat(record)
        )

    storage.get_backend(collection).update(
        {'loader_id': json_data['loader_id']},
        record,
        upsert=True
    )
Esempio n. 17
0
    def test_02_InitFromRaw(self):
        raw = {
            '_id': None,
            'crecord_name': 'titi',
            'crecord_type': 'raw',
            'crecord_write_time': None,
            'enable': True,
            'mydata1': 'data1',
            'mydata3': 'data3',
            'mydata2': 'data2',
            'crecord_type': 'raw'
        }

        record = Record(raw_record=raw)

        dump = record.dump()

        if not isinstance(dump['_id'], type(None)):
            raise Exception('Invalid _id type')

        if record.data != self.data:
            raise Exception('Data corruption ...')
Esempio n. 18
0
 def dump(self):
     self.name = self.user
     self.data['user'] = self.user
     self.data['lastname'] = self.lastname
     self.data['firstname'] = self.firstname
     self.data['mail'] = self.mail
     self.data['groups'] = list(self.groups)
     self.data['external'] = self.external
     '''
     if self.group:
         self.data['groups'].insert(0, self.group)
     '''
     self.data['shadowpasswd'] = self.shadowpasswd
     self.data['authkey'] = self.authkey
     return Record.dump(self)
Esempio n. 19
0
 def dump(self):
     self.name = self.user
     self.data['user'] = self.user
     self.data['lastname'] = self.lastname
     self.data['firstname'] = self.firstname
     self.data['mail'] = self.mail
     self.data['groups'] = list(self.groups)
     self.data['external'] = self.external
     '''
     if self.group:
         self.data['groups'].insert(0, self.group)
     '''
     self.data['shadowpasswd'] = self.shadowpasswd
     self.data['authkey'] = self.authkey
     return Record.dump(self)
Esempio n. 20
0
    def test_19_tree(self):
        record1 = Record({'data': 1}, name="record1")
        record2 = Record({'data': 2}, name="record2")
        record3 = Record({'data': 3}, name="record3")
        record4 = Record({'data': 4}, name="record4")

        STORAGE.put([record1, record2, record3, record4])

        record2.add_children(record4)

        record1.add_children(record2)
        record1.add_children(record3)

        STORAGE.put([record1, record2])
        STORAGE.get_record_childs(record1)
        STORAGE.recursive_get(record1)

        STORAGE.print_record_tree(record1)

        dumps(record1.dump(json=True))
Esempio n. 21
0
    def test_11_ManyInsert(self):
        record1 = Record({'check': 'test1', 'state': 1})
        record2 = Record({'check': 'test2', 'state': 0})
        record3 = Record({'check': 'test3', 'state': 0})

        STORAGE.put([record1, record2, record3])
Esempio n. 22
0
 def test_02_CreateRecord(self):
     global MYRECORD
     MYRECORD = Record(self.data, storage=STORAGE)
Esempio n. 23
0
    def work(self, event, *args, **kwargs):

        # If the event is a downtime event,
        # add entry to the downtime collection
        if event['event_type'] == 'downtime':
            self.logger.debug(
                'Event downtime received: {0}'.format(event['rk']))

            # Build entry, so we know there is a downtime on the component
            record = Record({
                '_expire': event['start'] + event['duration'],

                'connector': event['connector'],
                'source': event['connector_name'],
                'component': event['component'],
                'resource': event.get('resource', None),

                'start': event['start'],
                'end': event['end'],
                'fixed': event['fixed'],
                'timestamp': event['entry'],

                'author': event['author'],
                'comment': event['output']
            })

            # Save record, and log the action
            record.save(self.storage)

            logevent = forger(
                connector="Engine",
                connector_name=self.etype,
                event_type="log",
                source_type=event['source_type'],
                component=event['component'],
                resource=event.get('resource', None),

                state=0,
                state_type=1,

                output=u'Downtime scheduled by {0} from {1} to {2}'.format(
                    event['author'],
                    event['start'],
                    event['end']
                ),

                long_output=event['output']
            )

            logevent['downtime_connector'] = event['connector']
            logevent['downtime_source'] = event['connector_name']

            publish(publisher=self.amqp, event=logevent)

            # Set downtime for events already in database
            self.evt_backend.update(
                {
                    'connector': event['connector'],
                    'connector_name': event['connector_name'],
                    'component': event['component'],
                    'resource': event.get('resource', None)
                },
                {
                    '$set': {
                        'downtime': True
                    }
                },
                multi=True
            )
            # Takes care of the new downtime
            self.cdowntime.reload(delta_beat=self.beat_interval)

        # For every other case, check if the event is in downtime
        else:

            event['downtime'] = False
            if (self.cdowntime.is_downtime(
                event.get('component', ''),
                    event.get('resource', ''))):
                event['downtime'] = True
                self.logger.debug(
                    'Received event: {0}, and set downtime to {1}'.format(
                        event['rk'],
                        event['downtime']))
        return event
Esempio n. 24
0
    def load(self, dump):
        Record.load(self, dump)

        self.statemap = self.data['statemap']
Esempio n. 25
0
    def dump(self):
        self.data['statemap'] = self.statemap

        return Record.dump(self)
Esempio n. 26
0
 def test_01_Init(self):
     record = Record(self.data)
     if record.data != self.data:
         raise Exception('Data corruption ...')
Esempio n. 27
0
    def get(self, _id_or_ids, account=None, namespace=None, mfields=None, ignore_bin=True):
        self.check_connected()

        if not account:
            account = self.account

        dolist = False
        if isinstance(_id_or_ids, list):
            _ids = _id_or_ids
            dolist = True
        else:
            _ids = [ _id_or_ids ]

        backend = self.get_backend(namespace)

        self.logger.debug(" + Get record(s) '%s'" % _ids)
        if not len(_ids):
            self.logger.debug("   + No ids")
            return []

        self.logger.debug("   + fields : %s" % mfields)

        self.logger.debug("   + Clean ids")
        _ids = [self.clean_id(_id) for _id in _ids]

        #Build basic filter
        (Read_mfilter, Write_mfilter) = self.make_mongofilter(account)

        if len(_ids) == 1:
            mfilter = {'_id': _ids[0]}
        else:
            mfilter = {'_id': {'$in': _ids }}

        mfilter = { '$and': [ mfilter, Read_mfilter ] }

        #self.logger.debug("   + mfilter: %s" % mfilter)
        records = []
        try:
            if len(_ids) == 1:
                raw_record = backend.find_one(mfilter, projection=mfields)

                # Remove binary (base64)
                if ignore_bin and raw_record and raw_record.get('media_bin', None):
                    del raw_record['media_bin']

                if raw_record and mfields:
                    records.append(raw_record)
                elif raw_record:
                    records.append(Record(raw_record=raw_record))
            else:
                raw_records = backend.find_many(mfilter, projection=mfields)

                if mfields:
                    records = [raw_record for raw_record in raw_records]
                else:
                    for raw_record in raw_records:
                        # Remove binary (base64)
                        if ignore_bin and raw_record.get('media_bin', None):
                            del raw_record['media_bin']

                        records.append(Record(raw_record=raw_record))

        except Exception as err:
            self.logger.error("Impossible get record '%s' !\nReason: %s" % (_ids, err))

        self.logger.debug(" + Found %s records" % len(records))
        if not len(records):
            raise KeyError("'%s' not found ..." % _ids)

        if len(_ids) == 1 and not dolist:
            return records[0]
        else:
            return records
Esempio n. 28
0
    def find(self, mfilter={}, mfields=None, account=None, namespace=None, one=False, count=False, sort=None, limit=0, offset=0, for_write=False, ignore_bin=True, raw=False, with_total=False):
        self.check_connected()

        if not account:
            account = self.account

        if isinstance(sort, basestring):
            sort = [(sort, 1)]

        # Clean Id
        if mfilter.get('_id', None):
            mfilter['_id'] = self.clean_id(mfilter['_id'])

        if one:
            sort = [('timestamp', -1)]

        self.logger.debug("Find records from mfilter" )

        (Read_mfilter, Write_mfilter) = self.make_mongofilter(account)

        if for_write:
            if Write_mfilter:
                mfilter = { '$and': [ mfilter, Write_mfilter ] }
        else:
            if Read_mfilter:
                mfilter = { '$and': [ mfilter, Read_mfilter ] }

        self.logger.debug(" + fields : %s" % mfields)
        self.logger.debug(" + mfilter: %s" % mfilter)

        backend = self.get_backend(namespace)

        if one:
            raw_records = backend.find_one(mfilter, projection=mfields)
            if raw_records:
                raw_records = [ raw_records ]
            else:
                raw_records = []
        else:

            count_limit_reached = backend.count() > self.no_count_limit

            if count_limit_reached:

                if limit == 0:
                    limit = self.fetch_limit

                if limit > 1:
                    #change limit artificially to fetch one more result if possible
                    limit += 1

            if sort is None:
                raw_records = backend.find(mfilter, projection=mfields, skip=offset, limit=limit)
            else:
                raw_records = backend.find(mfilter, projection=mfields, skip=offset, limit=limit, sort=sort)



            """
                Because mongo counts computation time is not acceptable, total is equal
                to the element fetched count (can be limit or less before it is artificially changed) OR
                total is offset + limit events and possibly + 1 if limit is reached
                (when +1 , this means some other records are availables)
            """

            if count_limit_reached:
                #When count limit reached, then count is done as described upper
                raw_records = list(raw_records)

                total = len(raw_records) + offset

                if limit > 1:
                    raw_records = raw_records[:limit -1]

            else:
                #Otherwise, count is done on the collection with given filter.
                total = raw_records.count()
                raw_records = list(raw_records)

            # process limit, offset and sort independently of pymongo because sort does not use index
            if count:
                return total



        records=[]

        if not mfields:
            for raw_record in raw_records:
                try:
                    # Remove binary (base64)
                    if ignore_bin and raw_record.get('media_bin', None):
                        del raw_record['media_bin']

                    if not raw:
                        records.append(Record(raw_record=raw_record))
                    else:
                        records.append(raw_record)

                except Exception as err:
                    ## Not record format ..
                    self.logger.error("Impossible parse record ('%s') !" % err)
        else:
            records = raw_records

        self.logger.debug("Found %s record(s)" % len(records))

        if one:
            if len(records) > 0:
                return records[0]
            else:
                return None
        else:
            if with_total: # returns the couple of records, total
                return records, total

            return records