Exemple #1
0
    def test_datamodel_typefns(self):
        self.eq(s_datamodel.getTypeRepr('str', 'haha'), 'haha')
        self.eq(s_datamodel.getTypeRepr('inet:ipv4', 0x01020304), '1.2.3.4')

        self.eq(s_datamodel.getTypeNorm('str', 'haha'), ('haha', {}))
        self.eq(s_datamodel.getTypeNorm('inet:ipv4', 0x01020304), (16909060, {}))
        self.eq(s_datamodel.getTypeNorm('inet:ipv4', '1.2.3.4'), (16909060, {}))

        self.raises(BadTypeValu, s_datamodel.getTypeNorm, 'inet:ipv4', 'hahaha')

        self.eq(s_datamodel.getTypeParse('str', 'haha'), ('haha', {}))
        self.eq(s_datamodel.getTypeParse('inet:ipv4', '1.2.3.4'), (16909060, {}))
Exemple #2
0
    def setConfOpt(self, name, valu):
        '''
        Set a single config option for the object.
        '''
        oldval = self._conf_opts.get(name)

        cdef = self.getConfDef(name)

        ctype = cdef[1].get('type')
        if ctype is not None:
            valu, _ = s_datamodel.getTypeNorm(ctype, valu)

        if valu == oldval:
            return False

        asloc = cdef[1].get('asloc')
        if asloc is not None:
            setattr(self, asloc, valu)

        self._conf_opts[name] = valu
        self._conf_defs[name][1]['valu'] = valu

        self.fire('syn:conf:set', name=name, valu=valu, oldval=oldval)
        self.fire('syn:conf:set:%s' % name,
                  name=name,
                  valu=valu,
                  oldval=oldval)
Exemple #3
0
    def _initDbConn(self):
        import psycopg2
        self._psycopg2 = psycopg2

        retry = self._link[1].get('retry', 0)

        dbinfo = self._initDbInfo()

        db = None
        tries = 0
        while db is None:
            try:
                db = psycopg2.connect(**dbinfo)
            except Exception as e:  # pragma: no cover
                tries += 1
                if tries > retry:
                    raise

                time.sleep(1)

        seqscan = self._link[1].get('pg:seqscan', 0)
        seqscan, _ = s_datamodel.getTypeNorm('bool', seqscan)

        c = db.cursor()
        c.execute('SET enable_seqscan=%s', (seqscan, ))
        c.close()

        return db
Exemple #4
0
    def test_datamodel_typefns(self):
        self.eq(s_datamodel.getTypeRepr('str', 'haha'), 'haha')
        self.eq(s_datamodel.getTypeRepr('inet:ipv4', 0x01020304), '1.2.3.4')

        self.eq(s_datamodel.getTypeNorm('str', 'haha'), ('haha', {}))
        self.eq(s_datamodel.getTypeNorm('inet:ipv4', 0x01020304),
                (16909060, {}))

        self.eq(s_datamodel.getTypeFrob('str', 'haha'), ('haha', {}))
        self.eq(s_datamodel.getTypeFrob('inet:ipv4', '1.2.3.4'),
                (16909060, {}))
        self.eq(s_datamodel.getTypeFrob('inet:ipv4', 0x01020304),
                (16909060, {}))
        self.eq(s_datamodel.getTypeFrob('inet:ipv4', 'haha'), (None, {}))

        self.eq(s_datamodel.getTypeParse('str', 'haha'), ('haha', {}))
        self.eq(s_datamodel.getTypeParse('inet:ipv4', '1.2.3.4'),
                (16909060, {}))
Exemple #5
0
    def getConfNorm(self, name, valu):
        '''
        Return a normalized version of valu based on type knowledge for name.

        Args:
            name (str): The name of the config option
            valu (obj): The valu of the config option

        Returns:
            (obj):  The normalized form for valu
        '''
        cdef = self.getConfDef(name)
        ctype = cdef[1].get('type')
        if ctype is None:
            return valu, {}
        return s_datamodel.getTypeNorm(ctype, valu)
Exemple #6
0
    def setConfOpt(self, name, valu):
        '''
        Set a single config option for the object.

        This will perform type normalization if the configration option has a 'type' value set.

        Args:
            name (str): Configuration name
            valu: Value to set to the configuration option.

        Notes:
            This fires the following events, so that the EventBus can react to configuration changes. Each event
            includes the name, new valu and oldvalu.

            - ``syn:conf:set``
            - ``syn:conf:set:<name>``

        Returns:
            None
        '''
        oldval = self._conf_opts.get(name)

        cdef = self.getConfDef(name)

        ctype = cdef[1].get('type')
        if ctype is not None:
            valu, _ = s_datamodel.getTypeNorm(ctype, valu)

        if valu == oldval:
            return False

        asloc = cdef[1].get('asloc')
        if asloc is not None:
            setattr(self, asloc, valu)

        self._conf_opts[name] = valu
        self._conf_defs[name][1]['valu'] = valu

        self.fire('syn:conf:set', name=name, valu=valu, oldval=oldval)
        self.fire('syn:conf:set:%s' % name,
                  name=name,
                  valu=valu,
                  oldval=oldval)
Exemple #7
0
    def _initDbConn(self):
        dbinfo = self._initDbInfo()
        dbname = dbinfo.get('name')

        # Initial DB Size.  Must be < 2 GiB for 32-bit.  Can be big for 64-bit systems.  Will create
        # a file of that size.  On Windows, will actually immediately take up that much
        # disk space.
        DEFAULT_MAP_SIZE = 512 * 1024 * 1024

        # _write_lock exists solely to hold off other threads' write transactions long enough to
        # potentially increase the map size.
        self._write_lock = Lock()

        map_size = self._link[1].get('lmdb:mapsize', DEFAULT_MAP_SIZE)
        self._map_size, _ = s_datamodel.getTypeNorm('int', map_size)
        self._max_map_size = 2**46 if sys.maxsize > 2**32 else 2**30

        map_slack = self._link[1].get('lmdb:mapslack', 2 ** 30)
        self._map_slack, _ = s_datamodel.getTypeNorm('int', map_slack)

        # Maximum number of 'databases', really tables.  We use 5 different tables (1 main plus
        # 3 indices and a blob store), + 10 tables for possible migration use cases.
        MAX_DBS = 5 + 10

        # flush system buffers to disk only once per transaction.  Set to False can lead to last
        # transaction loss, but not corruption

        metasync_val = self._link[1].get('lmdb:metasync', False)
        metasync, _ = s_datamodel.getTypeNorm('bool', metasync_val)
        metasync = (metasync == 1)

        # If sync is False, could lead to database corruption on power loss
        sync_val = self._link[1].get('lmdb:sync', True)
        sync, _ = s_datamodel.getTypeNorm('bool', sync_val)
        sync = (sync == 1)

        # Write data directly to mapped memory
        WRITEMAP = True

        # Doesn't create a subdirectory for storage files
        SUBDIR = False

        # We can disable locking, but bad things might happen if we have multiple threads
        DEFAULT_LOCK = True
        lock_val = self._link[1].get('lmdb:lock', DEFAULT_LOCK)
        lock, _ = s_datamodel.getTypeNorm('bool', lock_val)
        lock = (lock == 1)

        # Maximum simultaneous readers.
        MAX_READERS = 4
        max_readers = self._link[1].get('lmdb:maxreaders', MAX_READERS)
        max_readers, _ = s_datamodel.getTypeNorm('int', max_readers)
        if max_readers == 1:
            lock = False

        self.dbenv = lmdb.Environment(dbname,
                                      map_size=self._map_size,
                                      subdir=SUBDIR,
                                      metasync=metasync,
                                      writemap=WRITEMAP,
                                      max_readers=max_readers,
                                      max_dbs=MAX_DBS,
                                      sync=sync,
                                      lock=lock)

        # Check we're not running a weird version of LMDB
        if self.dbenv.stat()['psize'] != 4096:
            raise s_common.BadCoreStore(store='lmdb', mesg='Unknown version of lmdb configured')

        # Ensure we have enough room in the map for expansion
        self._ensure_map_slack()

        def onfini():
            self.dbenv.close()
        self.onfini(onfini)