Exemple #1
0
 def __init__(self, settings):
     super().__init__(settings)
     self.img_field = Image({
         'position': (0, 0),
         'path': 'static/field',
     })
     self.img_next = Image({
         'position': (300, 77),
         'path': 'static/next',
     })
     self.img_hold = Image({
         'position': (450, 77),
         'path': 'static/hold',
     })
     self.txt_score = Text({
         'position': (300, 300),
         'text': 'Score: 0',
     })
     self.current = pygame.Surface(
         (BOARD_SIZE[0] * TILE, BOARD_SIZE[1] * TILE))
     self.base = Base()
     self.figure = Figure()
     self.next = Figure()
     self.hold = None
     self.hold_first = True
     self.score = 0
     self.state = '__WAIT__'
     self.field = [[0 for _ in range(BOARD_SIZE[0])]
                   for _ in range(BOARD_SIZE[1])]
Exemple #2
0
def run_on_days(
    app: Base,
    callback: Callable[..., None],
    day_list: list,
    start: datetime.time,
    **kwargs
) -> Callable:
    """Run a callback on certain days (at the specified time)."""
    handles = []
    upcoming_days = []

    today = app.date()
    todays_event = datetime.datetime.combine(today, start)

    if todays_event > app.datetime():
        if today.strftime("%A") in day_list:
            upcoming_days.append(today)

    for day_number in range(1, 8):
        day = today + datetime.timedelta(days=day_number)
        if day.strftime("%A") in day_list:
            if len(upcoming_days) < len(day_list):
                upcoming_days.append(day)

    for day in upcoming_days:
        event = datetime.datetime.combine(day, start)
        handles.append(app.run_every(callback, event, 604800, **kwargs))

    def cancel():
        """Define a method to cancel all of the handles."""
        for handle in handles:
            app.cancel_timer(handle)

    return cancel
Exemple #3
0
def main(config):
    loader = Loader(config)
    base = Base(config, loader)
    make_dirs(base.output_path)
    make_dirs(base.save_logs_path)
    make_dirs(base.save_model_path)
    logger = Logger(os.path.join(base.save_logs_path, 'log.txt'))
    logger(config)

    if config.mode == 'train':
        if config.resume_train_epoch >= 0:
            base.resume_model(config.resume_train_epoch)
            start_train_epoch = config.resume_train_epoch
        else:

            start_train_epoch = 0

        if config.auto_resume_training_from_lastest_step:
            root, _, files = os_walk(base.save_model_path)
            if len(files) > 0:
                indexes = []
                for file in files:
                    indexes.append(int(
                        file.replace('.pkl', '').split('_')[-1]))
                indexes = sorted(list(set(indexes)), reverse=False)
                base.resume_model(indexes[-1])
                start_train_epoch = indexes[-1]
                logger(
                    'Time: {}, automatically resume training from the latest step (model {})'
                    .format(time_now(), indexes[-1]))

        for current_epoch in range(start_train_epoch,
                                   config.total_train_epoch):
            base.save_model(current_epoch)

            if current_epoch < config.use_graph:
                _, result = train_meta_learning(base, loader)
                logger('Time: {}; Epoch: {}; {}'.format(
                    time_now(), current_epoch, result))
                if current_epoch + 1 >= 1 and (current_epoch + 1) % 40 == 0:
                    mAP, CMC = test(config, base, loader)
                    logger(
                        'Time: {}; Test on Target Dataset: {}, \nmAP: {} \n Rank: {}'
                        .format(time_now(), config.target_dataset, mAP, CMC))
            else:
                _, result = train_with_graph(config, base, loader)
                logger('Time: {}; Epoch: {}; {}'.format(
                    time_now(), current_epoch, result))
                if current_epoch + 1 >= 1 and (current_epoch + 1) % 5 == 0:
                    mAP, CMC = test_with_graph(config, base, loader)
                    logger(
                        'Time: {}; Test on Target Dataset: {}, \nmAP: {} \n Rank: {}'
                        .format(time_now(), config.target_dataset, mAP, CMC))

    elif config.mode == 'test':
        base.resume_model(config.resume_test_model)
        mAP, CMC = test_with_graph(config, base, loader)
        logger('Time: {}; Test on Target Dataset: {}, \nmAP: {} \n Rank: {}'.
               format(time_now(), config.target_dataset, mAP, CMC))
Exemple #4
0
 def start(self):
     self.base = Base()
     self.figure = Figure()
     self.next = Figure()
     self.hold = None
     self.score = 0
     self.txt_score.set_text('Score: 0')
     self.state = '__GAME__'
     self.field = [[0 for _ in range(BOARD_SIZE[0])]
                   for _ in range(BOARD_SIZE[1])]
 def test_save(self):
     from core.TestType import TestType
     from core.Base import Base
     my_type = str(uuid.uuid4())
     doc = [str(uuid.uuid4()), str(uuid.uuid4()), str(uuid.uuid4())]
     test_type = TestType(my_type, doc)
     test_type.save()
     ast = Base().get_all(TestType.collection, {})
     assert ast.count() == 1
     assert ast[0]['type'] == my_type
     assert ast[0]['doc_fields'] == doc
Exemple #6
0
 def get_manga_chapt(self, mhash, chash):
     with self.db.connect() as connection:
         return connection.cursor().execute(
             Base.source("sql/get_chapter.sql"), {
                 "mhash": mhash,
                 "chash": chash
             }).fetchone()
Exemple #7
0
 def manga(self) -> dict:
     if not self._MANGA:
         with self.connection as c:
             return c.execute(Base.source("sql/get_manga_by_hash.sql"), {
                 "hash": self.hash
             }).fetchone()
     return self._MANGA
Exemple #8
0
    def get_queued_by_hash(self, uhash):

        with self.db.connect() as connection:
            c = connection.cursor()
            return c.execute(Base.source("sql/url_exists.sql"), {
                "hash": uhash
            }).fetchone()
Exemple #9
0
    def get_by_cookie(self, cookie):

        with self.db.connect() as connection:
            c = connection.cursor()
            return c.execute(Base.source("sql/user_by_cookie.sql"), {
                'cookie': cookie
            }).fetchone()
Exemple #10
0
 def test_save(self):
     from core.Test import Test
     from core.Base import Base
     test_id = str(uuid.uuid4())
     owner = str(uuid.uuid4())
     test_type = str(uuid.uuid4())
     test = Test(test_id, owner, test_type)
     now = datetime.datetime.now()
     res = test.save()
     assert res
     at = Base().get_all(Test.collection, {})
     assert at.count() == 1
     assert at[0]['_id'] == test_id
     assert at[0]['owner'] == owner
     assert at[0]['test_id'] == test_id
     assert at[0]['type'] == test_type
     assert at[0]['last_seen'] < now + datetime.timedelta(seconds=1)
 def test_remove(self):
     from core.Status import Status
     from core.Base import Base
     test_id1 = str(uuid.uuid4())
     test_status1 = random.choice(['SUCCESS', 'FAILURE'])
     test_type = str(uuid.uuid4())
     details = {'browser': random.choice(['Firefox', 'Chrome'])}
     status1 = Status(test_id1, test_type, test_status1, details=details, last=True)
     status1.save()
     test_id2 = str(uuid.uuid4())
     test_status2 = random.choice(['SUCCESS', 'FAILURE'])
     status2 = Status(test_id2, test_type, test_status2, details=details, last=True)
     status2.save()
     status2.remove()
     ast = Base().get_all(Status.collection, {})
     assert ast.count() == 1
     assert ast[0]['test_id'] == test_id1
Exemple #12
0
 def test_save(self):
     from core.Index import Index
     from core.Base import Base
     my_type = str(uuid.uuid4())
     field = str(uuid.uuid4())
     values = [str(uuid.uuid4()), str(uuid.uuid4()), str(uuid.uuid4())]
     index = Index(my_type, field, values)
     index.save()
     ast = Base().get_all(Index.collection, {})
     assert ast.count() == 1
     assert ast[0]['type'] == my_type
     assert ast[0]['field'] == field
     assert ast[0]['values'] == values
     index2 = Index(my_type, field)
     assert index2._values == None
     index2 = index2.get()
     assert index2._values == values
Exemple #13
0
    def create_tables(self):
        connection = self.db.connect()

        c = connection.cursor()
        c.executescript(Base.source("sql/create_tables.sql"))

        connection.commit()
        connection.close()
Exemple #14
0
 def set_readed(self, uiid, ciid, piid):
     with self.connection as c:
         c.execute(Base.source("sql/set_page_readed.sql"), {
             "miid": self.miid,
             "pgid": piid,
             "uiid": uiid,
             "ciid": ciid
         })
Exemple #15
0
    def authors(self) -> list:
        if not self._AUTHORS:
            with self.connection as c:
                t = c.execute(Base.source("sql/select_authors_by_miid.sql"), {
                    "miid": self.miid
                }).fetchall()
                self._AUTHORS = list(map(lambda x: x["value"], t))

        return self._AUTHORS
Exemple #16
0
    def genres(self) -> list:
        if not self._GENRES:
            with self.connection as c:
                t = c.execute(Base.source("sql/select_genres_by_miid.sql"), {
                    "miid": self.miid
                }).fetchall()
                self._GENRES = list(map(lambda x: x["value"], t))

        return self._GENRES
Exemple #17
0
def run_on_weekdays(
    app: Base, callback: Callable[..., None], start: datetime.time, **kwargs
) -> list:
    """Run a callback on weekdays (at the specified time)."""
    return app.run_on_days(
        callback,
        ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday"],
        start,
        **kwargs
    )
Exemple #18
0
    def get_readed_count(self, ciid, uiid):
        if not uiid:
            return 0

        with self.connection as c:
            return c.execute(Base.source("sql/select_readed_count.sql"), {
                "miid": self.miid,
                "ciid": ciid,
                "uiid": uiid
            }).fetchone().get("readed", 0)
 def test_save(self):
     from core.Status import Status
     from core.Test import Test
     from core.Index import Index
     from core.TestType import TestType
     from core.Base import Base
     test_id = str(uuid.uuid4())
     test_status = random.choice(['SUCCESS', 'FAILURE'])
     test_type = str(uuid.uuid4())
     details = {'browser': random.choice(['Firefox', 'Chrome'])}
     status = Status(test_id, test_type, test_status, details=details)
     TestType(test_type, doc_fields_to_index=['browser']).save()
     now = datetime.datetime.now()
     status.save()
     ast = Base().get_all(Status.collection, {})
     assert ast.count() == 1
     assert ast[0]['test_id'] == test_id
     assert ast[0]['status'] == test_status
     assert ast[0]['details'] == details
     assert ast[0]['type'] == test_type
     assert ast[0]['on'] < now + datetime.timedelta(seconds=1)
     at = Base().get_all(Test.collection, {})
     assert at.count() == 1
     assert at[0]['test_id'] == test_id
     assert at[0]['type'] == test_type
     assert at[0]['last_seen'] < ast[0]['on'] + datetime.timedelta(seconds=1)
     st = Base().get_one(Index.collection, {})
     assert st['type'] == test_type
     assert st['field'] == 'browser'
     assert st['values'] == [details['browser']]
     st = Base().get_one(TestType.collection, {})
     assert st['type'] == test_type
     assert st['doc_fields'] == ['browser']
     test_id = str(uuid.uuid4())
     test_status = 'TO_RERUN'
     test_type = str(uuid.uuid4())
     status = Status(test_id, test_type, test_status)
     now = datetime.datetime.now()
     status.save()
     st = Base().get_one(Status.collection, {'test_id': test_id})
     assert st['status'] == 'CUSTOM'
     assert st['details']['original_status'] == test_status
 def test_update_last(self):
     from core.Status import Status
     from core.Base import Base
     test_id1 = str(uuid.uuid4())
     test_status1 = random.choice(['SUCCESS', 'FAILURE'])
     test_type = str(uuid.uuid4())
     details = {'browser': random.choice(['Firefox', 'Chrome'])}
     status1 = Status(test_id1, test_type, test_status1, details=details, last=True)
     status1.save()
     test_id2 = str(uuid.uuid4())
     test_status2 = random.choice(['SUCCESS', 'FAILURE'])
     status2 = Status(test_id2, test_type, test_status2, details=details, last=True)
     status2.save()
     st = Base().get_one(Status.collection, {})
     assert st['last'] == True
     status2.update_last()
     ast = Base().get_all(Status.collection, {'last': True})
     assert ast.count() == 1
     st = Base().get_one(Status.collection, {'test_id': test_id2})
     assert st['last'] == False
Exemple #21
0
    def get_readed(self, ciid, uiid):
        if not uiid:
            return []

        with self.connection as c:
            readed = c.execute(Base.source("sql/select_readed.sql"), {
                "miid": self.miid,
                "ciid": ciid,
                "uiid": uiid
            }).fetchall()

            return list(map(lambda x: x["pageid"], readed))
 def test_get_last(self):
     from core.Status import Status
     from core.Test import Test
     from core.Base import Base
     test_id = str(uuid.uuid4())
     test_status1 = 'FAILURE'
     details = {'browser': random.choice(['Firefox', 'Chrome'])}
     test_type = str(uuid.uuid4())
     status1 = Status(test_id, test_type, test_status1, details=details)
     status1.save_and_update()
     at = Base().get_all(Test.collection, {})
     assert at.count() == 1
     assert at[0]['test_id'] == test_id
     test_status2 = 'SUCCESS'
     status2 = Status(test_id, test_type, test_status2, details=details)
     status2.save_and_update()
     at = Base().get_all(Test.collection, {})
     assert at.count() == 1
     Base().upsert_by_id(Status.collection, bson.ObjectId(status1._id), {Status._on: datetime.datetime.now() - datetime.timedelta(seconds=3)})
     Base().upsert_by_id(Status.collection, bson.ObjectId(status2._id), {Status._on: datetime.datetime.now() - datetime.timedelta(seconds=3)})
     sl = Status(test_id).get_last()
     assert sl._status == 'SUCCESS'
     assert sl._test_id == test_id
     at = Base().get_all(Test.collection, {})
     assert at.count() == 1
     assert at[0]['last_seen'] > Status(base_id=status2._id).get()._on + datetime.timedelta(seconds=1)
Exemple #23
0
 def test_index(self):
     from core.Index import Index
     from core.Status import Status
     from core.TestType import TestType
     from core.Base import Base
     test_id = str(uuid.uuid4())
     test_status = random.choice(['SUCCESS', 'FAILURE'])
     test_type = str(uuid.uuid4())
     field1 = 'browser'
     field2 = 'environment'
     details1 = {field1: 'Firefox'}
     TestType(test_type, doc_fields_to_index=[field1, field2]).save()
     status1 = Status(test_id, test_type, test_status, details=details1)
     details2 = {field1: 'Chrome'}
     status2 = Status(test_id, test_type, test_status, details=details2)
     details3 = {field2: 'master'}
     status3 = Status(test_id, test_type, test_status, details=details3)
     #Index.index(status1)
     status1.save()
     ast = Base().get_all(Index.collection, {})
     assert ast.count() == 1
     assert ast[0]['type'] == test_type
     assert ast[0]['field'] == field1
     assert ast[0]['values'] == ['Firefox']
     #Index.index(status2)
     status2.save()
     ast = Base().get_all(Index.collection, {})
     assert ast.count() == 1
     assert sorted(ast[0]['values']) == sorted(['Chrome', 'Firefox'])
     #Index.index(status3)
     status3.save()
     ast = Base().get_all(Index.collection, {})
     assert ast.count() == 2
     ast = Base().get_all(Index.collection, {'field': 'browser'})
     assert ast.count() == 1
     assert sorted(ast[0]['values']) == sorted(['Chrome', 'Firefox'])
     ast = Base().get_all(Index.collection, {'field': 'environment'})
     assert ast.count() == 1
     assert ast[0]['values'] == ['master']
 def test_purge(self):
     from core.Status import Status
     from core.Base import Base
     test_id = str(uuid.uuid4())
     test_status1 = 'FAILURE'
     details = {'browser': random.choice(['Firefox', 'Chrome'])}
     test_type = str(uuid.uuid4())
     status1 = Status(test_id, test_type, test_status1, details=details)
     status1.save_and_update()
     ast = Base().get_all(Status.collection, {})
     Base().upsert_by_id(Status.collection, bson.ObjectId(status1._id), {Status._on: datetime.datetime.now() - datetime.timedelta(days=8)})
     ast = Base().get_all(Status.collection, {})
     test_id2 = str(uuid.uuid4())
     test_status2 = 'SUCCESS'
     status2 = Status(test_id2, test_type, test_status2, details=details)
     status2.save_and_update()
     test_status3 = 'SUCCESS'
     status3 = Status(test_id, test_type, test_status3, details=details)
     status3.save_and_update()
     res = status3.purge()
     assert res['nb_removed'] == 1
     ast = Base().get_all(Status.collection, {})
     assert ast.count() == 2
     assert sorted([str(st['_id']) for st in ast]) == sorted([status2._id, status3._id])
 def test_save_and_update(self):
     from core.Status import Status
     from core.Base import Base
     test_id = str(uuid.uuid4())
     test_status1 = 'FAILURE'
     test_type = str(uuid.uuid4())
     details = {'browser': random.choice(['Firefox', 'Chrome'])}
     status1 = Status(test_id, test_type, test_status1, details=details)
     status1.save_and_update()
     st = Base().get_one(Status.collection, {})
     assert st['last'] == True
     test_status2 = 'SUCCESS'
     status2 = Status(test_id, test_type, test_status2, details=details)
     status2.save_and_update()
     ast = Base().get_all(Status.collection, {})
     assert ast.count() == 2
     ast = Base().get_all(Status.collection, {'last': False})
     assert ast.count() == 1
     assert ast[0]['status'] == 'FAILURE'
     ast = Base().get_all(Status.collection, {'last': True})
     assert ast.count() == 1
     assert ast[0]['status'] == 'SUCCESS'
Exemple #26
0
def main(config):

    # init loaders and base
    loaders = Loaders(config)
    base = Base(config, loaders)

    # make directions
    make_dirs(base.output_path)
    make_dirs(base.save_model_path)
    make_dirs(base.save_logs_path)
    make_dirs(base.save_visualize_market_path)
    make_dirs(base.save_visualize_duke_path)

    # init logger
    logger = Logger(
        os.path.join(os.path.join(config.output_path, 'logs/'), 'log.txt'))
    logger('\n' * 3)
    logger(config)

    if config.mode == 'train':  # train mode

        # resume model from the resume_train_epoch
        start_train_epoch = 0

        # automatically resume model from the latest one
        if config.auto_resume_training_from_lastest_steps:
            root, _, files = os_walk(base.save_model_path)
            if len(files) > 0:
                # get indexes of saved models
                indexes = []
                for file in files:
                    indexes.append(int(
                        file.replace('.pkl', '').split('_')[-1]))
                indexes = sorted(list(set(indexes)), reverse=False)
                # resume model from the latest model
                base.resume_model(indexes[-1])
                #
                start_train_epoch = indexes[-1]
                logger(
                    'Time: {}, automatically resume training from the latest step (model {})'
                    .format(time_now(), indexes[-1]))

        # main loop
        for current_epoch in range(start_train_epoch,
                                   config.total_train_epochs):
            # save model
            base.save_model(current_epoch)
            # train
            base.lr_scheduler.step(current_epoch)
            _, results = train_an_epoch(config, base, loaders, current_epoch)
            logger('Time: {};  Epoch: {};  {}'.format(time_now(),
                                                      current_epoch, results))
        # test
        testwithVer2(config,
                     logger,
                     base,
                     loaders,
                     'duke',
                     use_gcn=True,
                     use_gm=True)

    elif config.mode == 'test':  # test mode
        # resume from the resume_test_epoch
        if config.resume_test_path != '' and config.resume_test_epoch != 0:
            base.resume_model_from_path(config.resume_test_path,
                                        config.resume_test_epoch)
        else:
            assert 0, 'please set resume_test_path and resume_test_epoch '
        # test
        duke_map, duke_rank = testwithVer2(config,
                                           logger,
                                           base,
                                           loaders,
                                           'duke',
                                           use_gcn=False,
                                           use_gm=False)
        logger('Time: {},  base, Dataset: Duke  \nmAP: {} \nRank: {}'.format(
            time_now(), duke_map, duke_rank))
        duke_map, duke_rank = testwithVer2(config,
                                           logger,
                                           base,
                                           loaders,
                                           'duke',
                                           use_gcn=True,
                                           use_gm=False)
        logger(
            'Time: {},  base+gcn, Dataset: Duke  \nmAP: {} \nRank: {}'.format(
                time_now(), duke_map, duke_rank))
        duke_map, duke_rank = testwithVer2(config,
                                           logger,
                                           base,
                                           loaders,
                                           'duke',
                                           use_gcn=True,
                                           use_gm=True)
        logger('Time: {},  base+gcn+gm, Dataset: Duke  \nmAP: {} \nRank: {}'.
               format(time_now(), duke_map, duke_rank))
        logger('')

    elif config.mode == 'visualize':  # visualization mode
        # resume from the resume_visualize_epoch
        if config.resume_visualize_path != '' and config.resume_visualize_epoch != 0:
            base.resume_model_from_path(config.resume_visualize_path,
                                        config.resume_visualize_epoch)
            print('Time: {}, resume model from {} {}'.format(
                time_now(), config.resume_visualize_path,
                config.resume_visualize_epoch))
        # visualization
        if 'market' in config.train_dataset:
            visualize_ranked_images(config, base, loaders, 'market')
        elif 'duke' in config.train_dataset:
            visualize_ranked_images(config, base, loaders, 'duke')
        else:
            assert 0
Exemple #27
0
class Field(Element):
    def __init__(self, settings):
        super().__init__(settings)
        self.img_field = Image({
            'position': (0, 0),
            'path': 'static/field',
        })
        self.img_next = Image({
            'position': (300, 77),
            'path': 'static/next',
        })
        self.img_hold = Image({
            'position': (450, 77),
            'path': 'static/hold',
        })
        self.txt_score = Text({
            'position': (300, 300),
            'text': 'Score: 0',
        })
        self.current = pygame.Surface(
            (BOARD_SIZE[0] * TILE, BOARD_SIZE[1] * TILE))
        self.base = Base()
        self.figure = Figure()
        self.next = Figure()
        self.hold = None
        self.hold_first = True
        self.score = 0
        self.state = '__WAIT__'
        self.field = [[0 for _ in range(BOARD_SIZE[0])]
                      for _ in range(BOARD_SIZE[1])]

    def get(self, position, shift=(0, 0)):
        if all([
                0 <= position[1] + shift[1] < BOARD_SIZE[1],
                0 <= position[0] + shift[0] < BOARD_SIZE[0],
        ]):
            return self.field[position[1] + shift[1]][position[0] + shift[0]]
        return 1

    def set(self, position, value):
        self.field[position[1]][position[0]] = value

    def start(self):
        self.base = Base()
        self.figure = Figure()
        self.next = Figure()
        self.hold = None
        self.score = 0
        self.txt_score.set_text('Score: 0')
        self.state = '__GAME__'
        self.field = [[0 for _ in range(BOARD_SIZE[0])]
                      for _ in range(BOARD_SIZE[1])]

    def key_down(self, key):
        if key == pygame.K_a:
            if self.figure.can_move(self, (-1, 0)):
                self.figure.move((-1, 0))
        elif key == pygame.K_d:
            if self.figure.can_move(self, (1, 0)):
                self.figure.move((1, 0))
        elif key == pygame.K_q:
            if self.figure.can_rotate(self, 3):
                self.figure.rotate(3)
        elif key == pygame.K_e:
            if self.figure.can_rotate(self, 1):
                self.figure.rotate(1)
        elif key == pygame.K_w:
            if not self.hold:
                if self.hold_first:
                    self.hold = self.figure.copy()
                    self.figure = Figure()
                    self.hold_first = False
            else:
                self.figure = self.hold.copy()
                self.hold = None
        elif key == pygame.K_n:
            self.start()

    def show(self, surf):
        if self.state == '__GAME__':
            if self.figure.state == '__STOP__':
                if self.figure.can_move(self, (0, 1)):
                    self.figure.state = '__FALLING__'
                else:
                    self.hold_first = True
                    for block in self.figure.blocks:
                        self.set(block.position, 1)
                    deleted = []
                    self.base.add_blocks(self.figure)
                    for i in range(len(self.field)):
                        if all([
                                self.get((k, i))
                                for k in range(len(self.field[i]))
                        ]):
                            for row in range(i, 0, -1):
                                self.field[row] = self.field[row - 1]
                            deleted.append(i)
                            self.field[0] = [0 for _ in range(BOARD_SIZE[0])]
                    self.base.set_deleted(deleted)
                    if any([i == 1 for i in self.field[0]]):
                        self.state = '__LOSE__'
                        config.UPDATER.send_result(self.score)
                        if config.NICKNAME:
                            rating = config.WINDOW.get('txtRating')
                            rating.set_text(int(rating.text) + self.score)
                            best = config.WINDOW.get('txtBest')
                            best.set_text(max(int(best.text), self.score))
                    self.score += 2**len(deleted) if deleted else 0
                    self.txt_score.set_text('Score: ' + str(self.score))
                    self.figure = self.next
                    self.next = Figure()
        self.current.fill(BACKGROUND)
        self.img_field.show(self.current)
        if self.state == '__GAME__':
            self.figure.show(self.current)
        self.base.show(self.current)
        self.img_next.show(surf)
        self.img_hold.show(surf)
        if self.hold and self.state == '__GAME__':
            self.hold.static_show(surf, self.img_hold)
        if self.state == '__GAME__':
            self.next.static_show(surf, self.img_next)
        self.txt_score.show(surf)
        surf.blit(self.current, self.position)
Exemple #28
0
def run_on_weekend_days(
    app: Base, callback: Callable[..., None], start: datetime.time, **kwargs
) -> list:
    """Run a callback on weekend days (at the specified time)."""
    return app.run_on_days(callback, ["Friday", "Saturday"], start, **kwargs)
Exemple #29
0
 def get_chapter(self, chash):
     with self.connection as c:
         return c.execute(Base.source("sql/get_chapter.sql"), {
             "mhash": self.mhash,
             "chash": chash
         }).fetchone()
Exemple #30
0
        p.add_argument('--bind', '-b', nargs='?', default='0.0.0.0')
        p.add_argument('--manga', '-m', nargs='?', default='$HOME/.manga/')
        p.add_argument('--install',
                       action='store_true',
                       help="Creating database with initial groups and users.")
        return p.parse_args()


app_argv = ZeroMangaApplication.args()

BIND_ADDRES = app_argv.bind
PORT_NUMBER = app_argv.listen

data = {"download-threads": 8, "storage": os.path.expandvars(app_argv.manga)}

db = Base(os.path.join(os.path.expandvars(app_argv.manga), 'zeromanga.sqlite'))
data["db"] = db

users = Users(data)
data["users"] = users

if app_argv.install:
    ins = Install(data)
    ins.run_install()
    sys.exit()

data["manga"] = Manga(data)
data["taskman"] = TaskManager(data)

zmApp = ZeroMangaApplication()
Exemple #31
0
def main(config):

    # loaders and base
    loaders = Loaders(config)
    base = Base(config, loaders)

    # make dirs
    make_dirs(config.save_images_path)
    make_dirs(config.save_models_path)
    make_dirs(config.save_features_path)

    # logger
    logger = Logger(os.path.join(config.output_path, 'log.txt'))
    logger(config)

    if config.mode == 'train':

        # automatically resume model from the latest one
        start_train_epoch = 0
        root, _, files = os_walk(config.save_models_path)
        if len(files) > 0:
            # get indexes of saved models
            indexes = []
            for file in files:
                indexes.append(int(file.replace('.pkl', '').split('_')[-1]))

            # remove the bad-case and get available indexes
            model_num = len(base.model_list)
            available_indexes = copy.deepcopy(indexes)
            for element in indexes:
                if indexes.count(element) < model_num:
                    available_indexes.remove(element)

            available_indexes = sorted(list(set(available_indexes)),
                                       reverse=True)
            unavailable_indexes = list(
                set(indexes).difference(set(available_indexes)))

            if len(available_indexes
                   ) > 0:  # resume model from the latest model
                base.resume_model(available_indexes[0])
                start_train_epoch = available_indexes[0] + 1
                logger(
                    'Time: {}, automatically resume training from the latest step (model {})'
                    .format(time_now(), available_indexes[0]))
            else:  #
                logger('Time: {}, there are no available models')

        # main loop
        for current_epoch in range(
                start_train_epoch, config.warmup_reid_epoches +
                config.warmup_gan_epoches + config.train_epoches):

            # test
            if current_epoch % 10 == 0 and current_epoch > config.warmup_reid_epoches + config.warmup_gan_epoches:
                results = test(config, base, loaders, brief=True)
                for key in results.keys():
                    logger('Time: {}\n Setting: {}\n {}'.format(
                        time_now(), key, results[key]))

            # visualize generated images
            if current_epoch % 10 == 0 or current_epoch <= 10:
                visualize(config, loaders, base, current_epoch)

            # train
            if current_epoch < config.warmup_reid_epoches:  # warmup reid model
                results = train_an_epoch(config,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=True,
                                         train_pixel=False,
                                         optimize_sl_enc=True)
            elif current_epoch < config.warmup_reid_epoches + config.warmup_gan_epoches:  # warmup GAN model
                results = train_an_epoch(config,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=False,
                                         train_pixel=False,
                                         optimize_sl_enc=False)
            else:  # joint train
                results = train_an_epoch(config,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=True,
                                         train_pixel=True,
                                         optimize_sl_enc=True)
            logger('Time: {};  Epoch: {};  {}'.format(time_now(),
                                                      current_epoch, results))

            # save model
            base.save_model(current_epoch)

        # test
        results = test(config, base, loaders, brief=False)
        for key in results.keys():
            logger('Time: {}\n Setting: {}\n {}'.format(
                time_now(), key, results[key]))

    elif config.mode == 'test':
        # resume from pre-trained model and test
        base.resume_model_from_path(config.pretrained_model_path,
                                    config.pretrained_model_epoch)
        results = test(config, base, loaders, brief=False)
        for key in results.keys():
            logger('Time: {}\n Setting: {}\n {}'.format(
                time_now(), key, results[key]))
def main(config):

    # loaders and base
    loaders = Loaders(config)
    base = Base(config, loaders)

    # make dirs
    make_dirs(config.save_images_path)
    make_dirs(config.save_wp_models_path)
    make_dirs(config.save_st_models_path)
    make_dirs(config.save_features_path)

    logger = setup_logger('adaptation_reid', config.output_path, if_train=True)

    if config.mode == 'train':

        if config.resume:
            # automatically resume model from the latest one
            if config.resume_epoch_num == 0:
                start_train_epoch = 0
                root, _, files = os_walk(config.save_models_path)
                if len(files) > 0:
                    # get indexes of saved models
                    indexes = []
                    for file in files:
                        indexes.append(
                            int(file.replace('.pkl', '').split('_')[-1]))

                    # remove the bad-case and get available indexes
                    model_num = len(base.model_list)
                    available_indexes = copy.deepcopy(indexes)
                    for element in indexes:
                        if indexes.count(element) < model_num:
                            available_indexes.remove(element)

                    available_indexes = sorted(list(set(available_indexes)),
                                               reverse=True)
                    unavailable_indexes = list(
                        set(indexes).difference(set(available_indexes)))

                    if len(available_indexes
                           ) > 0:  # resume model from the latest model
                        base.resume_model(available_indexes[0])
                        start_train_epoch = available_indexes[0] + 1
                        logger.info(
                            'Time: {}, automatically resume training from the latest step (model {})'
                            .format(time_now(), available_indexes[0]))
                    else:  #
                        logger.info('Time: {}, there are no available models')
            else:
                start_train_epoch = config.resume_epoch_num
        else:
            start_train_epoch = 0

        # main loop
        for current_epoch in range(
                start_train_epoch, config.warmup_reid_epoches +
                config.warmup_gan_epoches + config.warmup_adaptation_epoches):

            # train
            if current_epoch < config.warmup_reid_epoches:  # warmup reid model
                results = train_an_epoch(config,
                                         0,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=True,
                                         self_training=False,
                                         optimize_sl_enc=True,
                                         train_adaptation=False)
            elif current_epoch < config.warmup_reid_epoches + config.warmup_gan_epoches:  # warmup GAN model
                results = train_an_epoch(config,
                                         0,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=False,
                                         self_training=False,
                                         optimize_sl_enc=False,
                                         train_adaptation=False)  # joint train
            elif current_epoch < config.warmup_reid_epoches + config.warmup_gan_epoches + config.warmup_adaptation_epoches:  #warmup adaptation
                results = train_an_epoch(config,
                                         0,
                                         loaders,
                                         base,
                                         current_epoch,
                                         train_gan=True,
                                         train_reid=False,
                                         self_training=False,
                                         optimize_sl_enc=False,
                                         train_adaptation=True)

            print("another epoch")
            logger.info('Time: {};  Epoch: {};  {}'.format(
                time_now(), current_epoch, results))
            # save model
            if current_epoch % config.save_model_interval == 0:
                base.save_model(current_epoch, True)

            if current_epoch % config.test_model_interval == 0:
                visualize(config, loaders, base, current_epoch)
                test(config, base, loaders, epoch=0, brief=False)

        total_wp_epoches = config.warmup_reid_epoches + config.warmup_gan_epoches

        for iter_n in range(config.iteration_number):
            src_dataset, src_dataloader, trg_dataset, trg_dataloader = loaders.get_self_train_loaders(
            )

            trg_labeled_dataloader = generate_labeled_dataset(
                base, iter_n, src_dataset, src_dataloader, trg_dataset,
                trg_dataloader)
            for epoch in range(total_wp_epoches + 1, config.self_train_epoch):
                results = train_an_epoch(
                    config,
                    iter_n,
                    loaders,
                    base,
                    epoch,
                    train_gan=True,
                    train_reid=False,
                    self_training=True,
                    optimize_sl_enc=True,
                    trg_labeled_loader=trg_labeled_dataloader)
                logger.info('Time: {};  Epoch: {};  {}'.format(
                    time_now(), current_epoch, results))

                if epoch % config.save_model_interval == 0:
                    base.save_model(iter_n * config.self_train_epoch + epoch,
                                    False)

    elif config.mode == 'test':
        # resume from pre-trained model and test
        base.resume_model_from_path(config.pretrained_model_path,
                                    config.pretrained_model_epoch)
        cmc, map = test(config, base, loaders, epoch=100, brief=False)
Exemple #33
0
    def get_queued(self):

        with self.db.connect() as connection:
            c = connection.cursor()
            return c.execute(Base.source("sql/qupload.sql")).fetchall() or []
Exemple #34
0
 def get_manga_by_hash(self, mhash):
     with self.db.connect() as connection:
         c = connection.cursor()
         return c.execute(Base.source("sql/get_manga_by_hash.sql"), {
             "hash": mhash
         }).fetchone()
Exemple #35
0
    def add_url(self, uiid, url):

        with self.db.connect() as connection:
            c = connection.cursor()
            c.execute(Base.source("sql/add_url.sql"),
                      (uiid, url, aux.sha1(url)))
Exemple #36
0
 def authorize(self, uiid, cookie, ip):
     with self.db.connect() as connection:
         c = connection.cursor()
         c.execute(Base.source("sql/authorize.sql"), (uiid, ip, cookie))
         connection.commit()
def main(config):

    # init loaders and base
    loaders = ReIDLoaders(config)
    base = Base(config)

    # make directions
    make_dirs(base.output_path)

    # init logger
    logger = Logger(os.path.join(config.output_path, 'log.txt'))
    logger(config)

    assert config.mode in ['train', 'test', 'visualize']
    if config.mode == 'train':  # train mode

        # automatically resume model from the latest one
        if config.auto_resume_training_from_lastest_steps:
            start_train_epoch = base.resume_last_model()

        # main loop
        for current_epoch in range(start_train_epoch,
                                   config.total_train_epochs):
            # save model
            base.save_model(current_epoch)
            # train
            base.lr_scheduler.step(current_epoch)
            _, results = train_an_epoch(config, base, loaders)
            logger('Time: {};  Epoch: {};  {}'.format(time_now(),
                                                      current_epoch, results))

        # test
        base.save_model(config.total_train_epochs)
        mAP, CMC = test(config, base, loaders)
        logger('Time: {}; Test Dataset: {}, \nmAP: {} \nRank: {}'.format(
            time_now(), config.test_dataset, mAP, CMC))

    elif config.mode == 'test':  # test mode
        base.resume_from_model(config.resume_test_model)
        mAP, CMC = test(config, base, loaders)
        logger('Time: {}; Test Dataset: {}, \nmAP: {} \nRank: {}'.format(
            time_now(), config.test_dataset, mAP, CMC))

    elif config.mode == 'visualize':  # visualization mode
        base.resume_from_model(config.resume_visualize_model)
        visualize(config, base, loaders)
Exemple #38
0
def main(config):

    # init loaders and base
    loaders = Loaders(config)
    base = Base(config, loaders)

    # make directions
    make_dirs(base.output_path)
    make_dirs(base.save_model_path)
    make_dirs(base.save_logs_path)
    make_dirs(base.save_visualize_market_path)
    make_dirs(base.save_visualize_duke_path)

    # init logger
    logger = Logger(
        os.path.join(os.path.join(config.output_path, 'logs/'), 'log.txt'))
    logger('\n' * 3)
    logger(config)

    if config.mode == 'train':  # train mode

        # resume model from the resume_train_epoch
        if config.resume_train_epoch >= 0:
            base.resume_model(config.resume_train_epoch)
            start_train_epoch = config.resume_train_epoch
        else:
            start_train_epoch = 0

        # automatically resume model from the latest one
        if config.auto_resume_training_from_lastest_steps:
            root, _, files = os_walk(base.save_model_path)
            if len(files) > 0:
                # get indexes of saved models
                indexes = []
                for file in files:
                    indexes.append(int(
                        file.replace('.pkl', '').split('_')[-1]))
                indexes = sorted(list(set(indexes)), reverse=False)
                # resume model from the latest model
                base.resume_model(indexes[-1])
                #
                start_train_epoch = indexes[-1]
                logger(
                    'Time: {}, automatically resume training from the latest step (model {})'
                    .format(time_now(), indexes[-1]))

        # main loop
        for current_epoch in range(start_train_epoch,
                                   config.total_train_epochs):

            # save model
            base.save_model(current_epoch)

            # train
            base.lr_scheduler.step(current_epoch)
            _, results = train_an_epoch(config, base, loaders)
            logger('Time: {};  Epoch: {};  {}'.format(time_now(),
                                                      current_epoch, results))

            # test
            if (current_epoch + 1) % 40 == 0 and current_epoch + 1 >= 0:
                market_map, market_rank = test(config, base, loaders, 'market')
                duke_map, duke_rank = test(config, base, loaders, 'duke')
                logger(
                    'Time: {},  Dataset: Market  \nmAP: {} \nRank: {}'.format(
                        time_now(), market_map, market_rank))
                logger('Time: {},  Dataset: Duke  \nmAP: {} \nRank: {}'.format(
                    time_now(), duke_map, duke_rank))
                logger('')

    elif config.mode == 'test':  # test mode
        # resume from the resume_test_epoch
        if config.resume_test_epoch >= 0:
            base.resume_model(config.resume_test_epoch)
        # test
        market_map, market_rank = test(config, base, loaders, 'market')
        duke_map, duke_rank = test(config, base, loaders, 'duke')
        logger('Time: {},  Dataset: Market  \nmAP: {} \nRank: {}'.format(
            time_now(), market_map, market_rank))
        logger('Time: {},  Dataset: Duke  \nmAP: {} \nRank: {}'.format(
            time_now(), duke_map, duke_rank))
        logger('')

    elif config.mode == 'visualize':  # visualization mode
        # resume from the resume_visualize_epoch
        if config.resume_visualize_epoch >= 0:
            base.resume_model(config.resume_visualize_epoch)
        # visualization
        visualize_ranking_list(config, base, loaders, 'market')
        visualize_ranking_list(config, base, loaders, 'duke')