예제 #1
0
 def test_refresh_l_size(self):
     pgb_stream = StringIO()
     expected_stream = StringIO()
     pgb = ProgressBar(20, 3, stream=pgb_stream)
     pgb.refresh()
     expected_stream.write("\r["+' '*3+"]")
     self.assertEqual(pgb_stream.getvalue(), expected_stream.getvalue())
예제 #2
0
 def test_refresh(self):
     pgb_stream = StringIO()
     expected_stream = StringIO()
     pgb = ProgressBar(20, stream=pgb_stream)
     self.assertEqual(pgb_stream.getvalue(), expected_stream.getvalue()) # nothing print before refresh
     pgb.refresh()
     expected_stream.write("\r["+' '*20+"]")
     self.assertEqual(pgb_stream.getvalue(), expected_stream.getvalue())
예제 #3
0
def sqlexec(sqlstmts,
            cursor_or_execute,
            withpb=True,
            pbtitle='',
            delimiter=';',
            cnx=None):
    """execute sql statements ignoring DROP/ CREATE GROUP or USER statements
    error.

    :sqlstmts_as_string: a string or a list of sql statements.
    :cursor_or_execute: sql cursor or a callback used to execute statements
    :cnx: if given, commit/rollback at each statement.

    :withpb: if True, display a progresse bar
    :pbtitle: a string displayed as the progress bar title (if `withpb=True`)

    :delimiter: a string used to split sqlstmts (if it is a string)

    Return the failed statements (same type as sqlstmts)
    """
    if hasattr(cursor_or_execute, 'execute'):
        execute = cursor_or_execute.execute
    else:
        execute = cursor_or_execute
    sqlstmts_as_string = False
    if isinstance(sqlstmts, str):
        sqlstmts_as_string = True
        sqlstmts = sqlstmts.split(delimiter)
    if withpb:
        if sys.stdout.isatty():
            pb = ProgressBar(len(sqlstmts), title=pbtitle)
        else:
            pb = DummyProgressBar()
    failed = []
    for sql in sqlstmts:
        sql = sql.strip()
        if withpb:
            pb.update()
        if not sql:
            continue
        try:
            # some dbapi modules doesn't accept unicode for sql string
            execute(str(sql))
        except Exception as ex:
            print(ex, file=sys.stderr)
            if cnx:
                cnx.rollback()
            failed.append(sql)
        else:
            if cnx:
                cnx.commit()
    if withpb:
        print()
    if sqlstmts_as_string:
        failed = delimiter.join(failed)
    return failed
예제 #4
0
 def test_update_relative(self):
     pgb_stream = StringIO()
     expected_stream = StringIO()
     size=20
     pgb = ProgressBar(100, size, stream=pgb_stream)
     last = 0
     for dots in xrange(5, 105, 5):
         pgb.update(5, exact=False)
         dots /= 5
         expected_stream.write("\r["+('.'*dots)+(' '*(size-dots))+"]")
         self.assertEqual(pgb_stream.getvalue(), expected_stream.getvalue())
 def test_update_exact(self):
     pgb_stream = StringIO()
     expected_stream = StringIO()
     size=20
     pgb = ProgressBar(100, size, stream=pgb_stream)
     last = 0
     for dots in range(10, 105, 15):
         pgb.update(dots, exact=True)
         dots //= 5
         expected_stream.write("\r["+('='*dots)+(' '*(size-dots))+"]")
         self.assertEqual(pgb_stream.getvalue(), expected_stream.getvalue())
예제 #6
0
 def test_update_exact(self):
     pgb_stream = StringIO()
     expected_stream = StringIO()
     size=20
     pgb = ProgressBar(100, size, stream=pgb_stream)
     last = 0
     for dots in range(10, 105, 15):
         pgb.update(dots, exact=True)
         dots //= 5
         expected_stream.write("\r["+('='*dots)+(' '*(size-dots))+"]")
         self.assertEqual(pgb_stream.getvalue(), expected_stream.getvalue())
예제 #7
0
def reindex_entities(schema, cnx, withpb=True, etypes=None):
    """reindex all entities in the repository"""
    # deactivate modification_date hook since we don't want them
    # to be updated due to the reindexation
    repo = cnx.repo
    dbhelper = repo.system_source.dbhelper
    cursor = cnx.cnxset.cu
    if not dbhelper.has_fti_table(cursor):
        print('no text index table')
        dbhelper.init_fti(cursor)
    repo.system_source.do_fti = True  # ensure full-text indexation is activated
    if etypes is None:
        print('Reindexing entities')
        etypes = set()
        for eschema in schema.entities():
            if eschema.final:
                continue
            indexable_attrs = tuple(eschema.indexable_attributes()) # generator
            if not indexable_attrs:
                continue
            for container in etype_fti_containers(eschema):
                etypes.add(container)
        # clear fti table first
        cnx.system_sql('DELETE FROM %s' % dbhelper.fti_table)
    else:
        print('Reindexing entities of type %s' % \
              ', '.join(sorted(str(e) for e in etypes)))
        # clear fti table first. Use subquery for sql compatibility
        cnx.system_sql("DELETE FROM %s WHERE EXISTS(SELECT 1 FROM ENTITIES "
                       "WHERE eid=%s AND type IN (%s))" % (
                           dbhelper.fti_table, dbhelper.fti_uid_attr,
                           ','.join("'%s'" % etype for etype in etypes)))
    if withpb:
        pb = ProgressBar(len(etypes) + 1)
        pb.update()
    # reindex entities by generating rql queries which set all indexable
    # attribute to their current value
    source = repo.system_source
    for eschema in etypes:
        etype_class = cnx.vreg['etypes'].etype_class(str(eschema))
        for rset in etype_class.cw_fti_index_rql_limit(cnx):
            source.fti_index_entities(cnx, rset.entities())
            # clear entity cache to avoid high memory consumption on big tables
            cnx.drop_entity_cache()
        if withpb:
            pb.update()
    if withpb:
        pb.finish()
예제 #8
0
 def _update_test(self, nbops, expected, size = None):
     pgb_stream = StringIO()
     expected_stream = StringIO()
     if size is None:
         pgb = ProgressBar(nbops, stream=pgb_stream)
         size=20
     else:
         pgb = ProgressBar(nbops, size, stream=pgb_stream)
     last = 0
     for round in expected:
         if not hasattr(round, '__int__'):
             dots, update = round
         else:
             dots, update = round, None
         pgb.update()
         if update or (update is None and dots != last):
             last = dots
             expected_stream.write("\r["+('.'*dots)+(' '*(size-dots))+"]")
         self.assertEqual(pgb_stream.getvalue(), expected_stream.getvalue())
예제 #9
0
def serialize_schema(cnx, schema):
    """synchronize schema and permissions in the database according to
    current schema
    """
    _title = '-> storing the schema in the database '
    print(_title, end=' ')
    execute = cnx.execute
    eschemas = schema.entities()
    pb_size = (len(eschemas + schema.relations()) + len(CONSTRAINTS) +
               len([x for x in eschemas if x.specializes()]))
    if sys.stdout.isatty():
        pb = ProgressBar(pb_size, title=_title)
    else:
        pb = DummyProgressBar()
    groupmap = group_mapping(cnx, interactive=False)
    # serialize all entity types, assuring CWEType is serialized first for proper
    # is / is_instance_of insertion
    eschemas.remove(schema.eschema('CWEType'))
    eschemas.insert(0, schema.eschema('CWEType'))
    for eschema in eschemas:
        execschemarql(execute, eschema, eschema2rql(eschema, groupmap))
        pb.update()
    # serialize constraint types
    cstrtypemap = {}
    rql = 'INSERT CWConstraintType X: X name %(ct)s'
    for cstrtype in CONSTRAINTS:
        cstrtypemap[cstrtype] = execute(rql, {'ct': cstrtype},
                                        build_descr=False)[0][0]
        pb.update()
    # serialize relations
    for rschema in schema.relations():
        # skip virtual relations such as eid, has_text and identity
        if rschema in VIRTUAL_RTYPES:
            pb.update()
            continue
        if rschema.rule:
            execschemarql(execute, rschema, crschema2rql(rschema, groupmap))
            pb.update()
            continue
        execschemarql(execute, rschema, rschema2rql(rschema, addrdef=False))
        if rschema.symmetric:
            rdefs = [
                rdef for k, rdef in rschema.rdefs.items()
                if (rdef.subject, rdef.object) == k
            ]
        else:
            rdefs = rschema.rdefs.values()
        for rdef in rdefs:
            execschemarql(execute, rdef, rdef2rql(rdef, cstrtypemap, groupmap))
        pb.update()
    # serialize unique_together constraints
    for eschema in eschemas:
        if eschema._unique_together:
            execschemarql(execute, eschema, uniquetogether2rqls(eschema))
    # serialize yams inheritance relationships
    for rql, kwargs in specialize2rql(schema):
        execute(rql, kwargs, build_descr=False)
        pb.update()
    print()