示例#1
0
    def proccess_pokemons(self, config, pokemons):
        if not pokemons:
            raise Exception("No pokemons to process")

        csv_rows = []

        for pokemon in pokemons:
            pokemonInfo = pokemon_services.get_pokemon(pokemon["name"])

            values = []

            for fieldPath in config["csv"]["fields"]:
                try:
                    values.append(self.filter_fields(fieldPath, pokemonInfo))
                except Exception as e:
                    logger.error(str(e))
                    logger.error(
                        f"fieldPath = {fieldPath}, pokemonInfo = {pokemonInfo}"
                    )
                    raise Exception("Error in filter fields")

            csv_rows.append(values)

            # measure_memory.measure()

            garbage_collect()

            sleep_execution(INTERVAL_REQUISITIONS)

        self.write_rows(csv_rows)
示例#2
0
def upgrade():

    from inbox.server.config import load_config
    load_config()
    from inbox.sqlalchemy.util import generate_public_id
    from inbox.server.models import session_scope

    # These all inherit HasPublicID
    from inbox.server.models.tables.base import (
        Account, Block, Contact, Message, Namespace,
        SharedFolder, Thread, User, UserSession, HasPublicID)

    classes = [
        Account, Block, Contact, Message, Namespace,
        SharedFolder, Thread, User, UserSession]

    for c in classes:
        assert issubclass(c, HasPublicID)
        print '[{0}] adding public_id column... '.format(c.__tablename__),
        sys.stdout.flush()
        op.add_column(c.__tablename__, sa.Column(
            'public_id', mysql.BINARY(16), nullable=False))

        print 'adding index... ',
        op.create_index(
            'ix_{0}_public_id'.format(c.__tablename__),
            c.__tablename__,
            ['public_id'],
            unique=False)

        print 'Done!'
        sys.stdout.flush()

    print 'Finished adding columns. \nNow generating public_ids'

    with session_scope() as db_session:
        count = 0
        for c in classes:
            garbage_collect()
            print '[{0}] Loading rows. '.format(c.__name__),
            sys.stdout.flush()
            print 'Generating public_ids',
            sys.stdout.flush()
            for r in db_session.query(c).yield_per(chunk_size):
                count += 1
                r.public_id = generate_public_id()
                if not count % chunk_size:
                    sys.stdout.write('.')
                    sys.stdout.flush()
                    db_session.commit()
                    garbage_collect()
            sys.stdout.write(' Saving. '.format(c.__name__)),
            # sys.stdout.flush()
            sys.stdout.flush()
            db_session.commit()
            sys.stdout.write('Done!\n')
            sys.stdout.flush()
        print '\nUpdgraded OK!\n'
示例#3
0
    def write_rows(self, rows):
        if not rows:
            raise Exception('No rows to write')

        with open(self.local_path, 'a', newline=NEWLINE) as csvFile:
            csv_writer = csv.writer(csvFile, **CSV_WRITER)
            csv_writer.writerows(rows)

        garbage_collect()
def upgrade():
    op.add_column("transaction",
                  sa.Column("public_id", mysql.BINARY(16), nullable=True))
    op.add_column(
        "transaction",
        sa.Column("object_public_id", sa.String(length=191), nullable=True),
    )
    op.create_index("ix_transaction_public_id",
                    "transaction", ["public_id"],
                    unique=False)

    # TODO(emfree) reflect
    from inbox.ignition import main_engine
    from inbox.models.session import session_scope
    from inbox.sqlalchemy_ext.util import b36_to_bin, generate_public_id

    engine = main_engine(pool_size=1, max_overflow=0)
    Base = declarative_base()
    Base.metadata.reflect(engine)

    class Transaction(Base):
        __table__ = Base.metadata.tables["transaction"]

    with session_scope(versioned=False) as db_session:
        count = 0
        (num_transactions, ) = db_session.query(sa.func.max(
            Transaction.id)).one()
        print("Adding public ids to {} transactions".format(num_transactions))
        for pointer in range(0, num_transactions + 1, 500):
            for entry in db_session.query(Transaction).filter(
                    Transaction.id >= pointer, Transaction.id < pointer + 500):
                entry.public_id = b36_to_bin(generate_public_id())
                count += 1
                if not count % 500:
                    sys.stdout.write(".")
                    sys.stdout.flush()
                    db_session.commit()
                    garbage_collect()

    op.alter_column("transaction",
                    "public_id",
                    existing_type=mysql.BINARY(16),
                    nullable=False)

    op.add_column(
        "transaction",
        sa.Column("public_snapshot", sa.Text(length=4194304), nullable=True),
    )
    op.add_column(
        "transaction",
        sa.Column("private_snapshot", sa.Text(length=4194304), nullable=True),
    )
    op.drop_column("transaction", u"additional_data")
def upgrade():
    op.add_column('transaction',
                  sa.Column('public_id', mysql.BINARY(16), nullable=True))
    op.add_column(
        'transaction',
        sa.Column('object_public_id', sa.String(length=191), nullable=True))
    op.create_index('ix_transaction_public_id',
                    'transaction', ['public_id'],
                    unique=False)

    from inbox.sqlalchemy_ext.util import generate_public_id, b36_to_bin
    # TODO(emfree) reflect
    from inbox.models.session import session_scope
    from inbox.ignition import main_engine
    engine = main_engine(pool_size=1, max_overflow=0)
    Base = declarative_base()
    Base.metadata.reflect(engine)

    class Transaction(Base):
        __table__ = Base.metadata.tables['transaction']

    with session_scope(versioned=False,
                       ignore_soft_deletes=False) as db_session:
        count = 0
        num_transactions, = db_session.query(sa.func.max(Transaction.id)).one()
        print 'Adding public ids to {} transactions'.format(num_transactions)
        for pointer in range(0, num_transactions + 1, 500):
            for entry in db_session.query(Transaction).filter(
                    Transaction.id >= pointer, Transaction.id < pointer + 500):
                entry.public_id = b36_to_bin(generate_public_id())
                count += 1
                if not count % 500:
                    sys.stdout.write('.')
                    sys.stdout.flush()
                    db_session.commit()
                    garbage_collect()

    op.alter_column('transaction',
                    'public_id',
                    existing_type=mysql.BINARY(16),
                    nullable=False)

    op.add_column(
        'transaction',
        sa.Column('public_snapshot', sa.Text(length=4194304), nullable=True))
    op.add_column(
        'transaction',
        sa.Column('private_snapshot', sa.Text(length=4194304), nullable=True))
    op.drop_column('transaction', u'additional_data')
示例#6
0
文件: base.py 项目: caitp/inbox
def commit_uids(db_session, log, new_uids):
    new_messages = [item.message for item in new_uids]

    # Save message part blobs before committing changes to db.
    for msg in new_messages:
        threads = [
            Greenlet.spawn(log_uncaught_errors(part.save, log), part._data)
            for part in msg.parts if hasattr(part, '_data')
        ]
        # Fatally abort if part saves error out. Messages in this
        # chunk will be retried when the sync is restarted.
        gevent_check_join(log, threads,
                          "Could not save message parts to blob store!")
        # clear data to save memory
        for part in msg.parts:
            part._data = None

    garbage_collect()

    try:
        log.info("Committing {0} UIDs".format(len(new_uids)))
        db_session.add_all(new_uids)
        db_session.commit()
    except DataError as e:
        db_session.rollback()
        log.error("Issue inserting new UIDs into database. "
                  "This probably means that an object's property is "
                  "malformed or way too long, etc.")

        for uid in new_uids:
            log.error(uid)
            import inspect
            from pprint import pformat
            log.error(inspect.getmembers(uid))
            try:
                log.error(pformat(uid.__dict__, indent=2))
            except AttributeError:
                pass

            for part in uid.message.parts:
                log.error(inspect.getmembers(part))
                try:
                    log.error(pformat(part.__dict__, indent=2))
                except AttributeError:
                    pass

        raise e
示例#7
0
文件: base.py 项目: aceofspades/inbox
def commit_uids(db_session, log, new_uids):
    new_messages = [item.message for item in new_uids]

    # Save message part blobs before committing changes to db.
    for msg in new_messages:
        threads = [Greenlet.spawn(retry_with_logging, lambda:
                                  part.save(part._data), log)
                   for part in msg.parts if hasattr(part, '_data')]
        # Fatally abort if part saves error out. Messages in this
        # chunk will be retried when the sync is restarted.
        gevent_check_join(log, threads,
                          "Could not save message parts to blob store!")
        # clear data to save memory
        for part in msg.parts:
            part._data = None

    garbage_collect()

    try:
        log.info("Committing {0} UIDs".format(len(new_uids)))
        db_session.add_all(new_uids)
        db_session.commit()
    except DataError as e:
        db_session.rollback()
        log.error("Issue inserting new UIDs into database. "
                  "This probably means that an object's property is "
                  "malformed or way too long, etc.")

        for uid in new_uids:
            log.error(uid)
            import inspect
            from pprint import pformat
            log.error(inspect.getmembers(uid))
            try:
                log.error(pformat(uid.__dict__, indent=2))
            except AttributeError:
                pass

            for part in uid.message.parts:
                log.error(inspect.getmembers(part))
                try:
                    log.error(pformat(part.__dict__, indent=2))
                except AttributeError:
                    pass

        raise e
def upgrade():
    op.add_column('transaction',
                  sa.Column('public_id', mysql.BINARY(16), nullable=True))
    op.add_column('transaction',
                  sa.Column('object_public_id', sa.String(length=191),
                            nullable=True))
    op.create_index('ix_transaction_public_id', 'transaction', ['public_id'],
                    unique=False)

    from inbox.sqlalchemy_ext.util import generate_public_id, b36_to_bin
    # TODO(emfree) reflect
    from inbox.models.session import session_scope
    from inbox.ignition import main_engine
    engine = main_engine(pool_size=1, max_overflow=0)
    Base = declarative_base()
    Base.metadata.reflect(engine)

    class Transaction(Base):
        __table__ = Base.metadata.tables['transaction']

    with session_scope(versioned=False) as db_session:
        count = 0
        num_transactions, = db_session.query(sa.func.max(Transaction.id)).one()
        print 'Adding public ids to {} transactions'.format(num_transactions)
        for pointer in range(0, num_transactions + 1, 500):
            for entry in db_session.query(Transaction).filter(
                    Transaction.id >= pointer,
                    Transaction.id < pointer + 500):
                entry.public_id = b36_to_bin(generate_public_id())
                count += 1
                if not count % 500:
                    sys.stdout.write('.')
                    sys.stdout.flush()
                    db_session.commit()
                    garbage_collect()

    op.alter_column('transaction', 'public_id',
                    existing_type=mysql.BINARY(16), nullable=False)

    op.add_column('transaction', sa.Column('public_snapshot',
                                           sa.Text(length=4194304),
                                           nullable=True))
    op.add_column('transaction', sa.Column('private_snapshot',
                                           sa.Text(length=4194304),
                                           nullable=True))
    op.drop_column('transaction', u'additional_data')
示例#9
0
    def get_run_metrics(
        self, test_result: TestResult, run_metrics: RunMetrics
    ) -> MaybeMetricValuesForSingleIteration:
        """Discards the trace when done to avoid using too much memory."""
        already_had = test_result in self.path_to_file
        # TODO: may need join info
        res = map_ok(
            self.get(test_result, need_mechanisms_and_reasons=False, need_join_info=False),
            lambda trace: map_to_mapping(run_metrics, trace.metric),
        )

        if not already_had:
            del self.path_to_file[test_result]
            # Ensure the trace is freed.
            # Without this, Python won't do a collection, and C# will run out of memory.
            # C# garbage collection isn't enough since Python needs to do GC to release the trace.
            garbage_collect()
        return res
示例#10
0
文件: imap.py 项目: cenk/inbox
def commit_uids(db_session, log, new_imapuids):
    new_messages = [item.message for item in new_imapuids]

    # Save message part blobs before committing changes to db.
    for msg in new_messages:
        threads = [Greenlet.spawn(part.save, part._data) \
                for part in msg.parts if hasattr(part, '_data')]
        # Fatally abort if part saves error out. Messages in this
        # chunk will be retried when the sync is restarted.
        gevent_check_join(log, threads,
                "Could not save message parts to blob store!")
        # clear data to save memory
        for part in msg.parts:
            part._data = None

    garbage_collect()

    db_session.add_all(new_imapuids)
    db_session.commit()
示例#11
0
def upgrade():
    # These all inherit HasPublicID
    from inbox.models import (
        Account,
        Block,
        Contact,
        HasPublicID,
        Message,
        Namespace,
        SharedFolder,
        Thread,
        User,
        UserSession,
    )
    from inbox.models.session import session_scope
    from inbox.sqlalchemy_ext.util import generate_public_id

    classes = [
        Account,
        Block,
        Contact,
        Message,
        Namespace,
        SharedFolder,
        Thread,
        User,
        UserSession,
    ]

    for c in classes:
        assert issubclass(c, HasPublicID)
        print "[{0}] adding public_id column... ".format(c.__tablename__),
        sys.stdout.flush()
        op.add_column(c.__tablename__,
                      sa.Column("public_id", mysql.BINARY(16), nullable=False))

        print "adding index... ",
        op.create_index(
            "ix_{0}_public_id".format(c.__tablename__),
            c.__tablename__,
            ["public_id"],
            unique=False,
        )

        print "Done!"
        sys.stdout.flush()

    print "Finished adding columns. \nNow generating public_ids"

    with session_scope() as db_session:
        count = 0
        for c in classes:
            garbage_collect()
            print "[{0}] Loading rows. ".format(c.__name__),
            sys.stdout.flush()
            print "Generating public_ids",
            sys.stdout.flush()
            for r in db_session.query(c).yield_per(chunk_size):
                count += 1
                r.public_id = generate_public_id()
                if not count % chunk_size:
                    sys.stdout.write(".")
                    sys.stdout.flush()
                    db_session.commit()
                    garbage_collect()
            sys.stdout.write(" Saving. ".format(c.__name__)),
            # sys.stdout.flush()
            sys.stdout.flush()
            db_session.commit()
            sys.stdout.write("Done!\n")
            sys.stdout.flush()
        print "\nUpdgraded OK!\n"