def compute_feature_over_db():
    total = len(session.query(Entry).all())
    for i, d in enumerate(session.query(Entry).all()):  #type:Entry
        print(i, total)
        pred = xception_process(d.thumbnail_path)
        break
        d.xception_string = json.dumps(pred)
コード例 #2
0
def compute_feature_over_db():
    cl = KMeanPaletteClassifier()
    total = len(session.query(Entry).all())
    for i, d in enumerate(session.query(Entry).all()):  #type:Entry
        img = cv2.imread(d.thumbnail_path)
        pred = cl.fit(img)[0]
        print(i, total)
        d.color_labels = json.dumps(pred)

    session.commit()
コード例 #3
0
async def fetch_new_articles_with_filter(aiohttp_session: ClientSession,
                                         scraper_filter: ScrapFilters) -> list:
    page_number = 1
    result, is_have_new_posts = await fetch_articles_from_search_page(
        aiohttp_session, scraper_filter.path, page_number)
    print(f'Processed page {page_number}')
    while is_have_new_posts:
        page_number += 1
        articles = await fetch_articles_from_search_page(
            aiohttp_session=aiohttp_session,
            params=scraper_filter.path,
            page_number=page_number)
        if len(articles) == 0:
            break
        articles_id = [a['id'] for a in articles]

        old_articles = session.query(PostedArticles.article_id) \
            .filter(PostedArticles.article_id.in_(articles_id)) \
            .all()
        old_articles_id = [a.article_id for a in old_articles]
        new_articles_id = list(
            filter(lambda a_id: a_id not in old_articles_id, articles_id))
        new_articles = list(
            filter(lambda a: a['id'] in new_articles_id, articles))
        is_have_new_posts = len(new_articles) == len(articles) and len(
            articles_id) == ARTICLES_COUNT_ON_SEARCH_PAGE
        result += new_articles
        print(f'Processed page {page_number}')
    return result
コード例 #4
0
async def add_filter(command_arg: str, user_id: int):
    sender_user = session.query(Users).filter(
        Users.telegram_user_id == user_id).first()
    if not sender_user:
        sender_user = Users(telegram_user_id=user_id)
        session.add(sender_user)
    session.add(ScrapFilters(user_id=sender_user.id, path=command_arg))
コード例 #5
0
def compute_feature_over_db(func, ):
    total = len(session.query(Entry).all())
    cl = KMeanPaletteClassifier()

    for i, d in enumerate(session.query(Entry).all()):  #type:Entry
        if i % 10 == 0:
            print(i, total, np.round(i / total * 100, 2), "%")
        frame = cv2.imread(d.thumbnail_path)

        pred = xception_process(d.thumbnail_path)
        d.xception_string = json.dumps(pred)

        pred = cl.fit(frame)[0]
        d.color_labels = json.dumps(pred)

        if frame is not None:
            func(d, frame)
    session.commit()
コード例 #6
0
async def list_filters(chat_id: int):
    filters = session.query(ScrapFilters).all()
    async with aiohttp.ClientSession() as aiohttp_session:
        await send_message(
            aiohttp_session=aiohttp_session,
            chat_id=chat_id,
            text='\n\n'.join(
                f.path
                for f in filters) if filters else 'filters list is empty')
コード例 #7
0
async def notify_users_about_article(article, aiohttp_session: ClientSession):
    if len(article['images']) == 0:
        log.warning(f'skipped article with id {article["id"]} without images')
        return

    caption = build_caption(article)
    if len(caption) > 1024:
        new_description = article['text'][0:-(len(caption) - 1024 + 3)] + '...'
        caption = build_caption({**article, 'text': new_description})
    images = article['images'][0:9] if len(
        article['images']) > 10 else article['images']

    media_list = [{
        'type':
        'photo',
        'media':
        f'https://lunappimg.appspot.com/lun-ua/414/336/images-cropped/{image["image_id"]}.jpg'
    } for image in images]

    media_list[0] = {**media_list[0], 'caption': caption, 'parse_mode': 'HTML'}
    users = session.query(Users).all()
    for user in users:
        await send_media_group(aiohttp_session, user.id, media_list)
コード例 #8
0
async def main():
    scrap_filters = session.query(ScrapFilters).all()

    async with aiohttp.ClientSession() as aiohttp_session:
        new_articles = []
        for scrap_filter in scrap_filters:
            new_articles += fetch_new_articles_with_filter(
                aiohttp_session, scrap_filter)

        if new_articles:
            print(f'Sending notifications...')
        for article in new_articles:
            await notify_users_about_article(article, aiohttp_session)
            await asyncio.sleep(0.20)

        await aiohttp_session.close()

        if new_articles:
            rows = list(
                map(lambda a: PostedArticles(article_id=a['id']),
                    new_articles))
            session.add_all(rows)
            session.commit()
        print('Done.')
コード例 #9
0
        print(e)
        continue

    step = int(end - start / 30)
    print(start, end)
    images = []
    for i in range(start, end, step):
        cap.set(cv2.CAP_PROP_POS_FRAMES, i)
        ret, frame = cap.read()
        if frame is None:
            print("Frame is None", curr_movie)
            continue
        else:
            frame = resize_with_aspect(frame, 300)
            # cv2.imshow("out", frame)
            # cv2.waitKey(30)
            images.append(frame[:, :, ::-1])
    e = session.query(Entry).filter(
        Entry.movie_name == curr_movie_name).filter(
            Entry.frame_pos == center_f).one_or_none()
    print(e)
    with imageio.get_writer("data/gifs/" + str(e.id) + ".gif",
                            mode='I') as writer:
        for f in images:
            writer.append_data(f)

# session.commit()

# for filename in filenames:
#     images.append(imageio.imread(filename))
# imageio.mimsave('/path/to/movie.gif', images)
コード例 #10
0
from src.database import Entry, session
import pickle

dir_path = "data"
embeddings_path = dir_path + '/caption_embeddings.pkl'
file = open(embeddings_path, 'rb')
_caption_embeddings = pickle.load(file)
file.close()

csv_path = 'data/captions.csv'
csv = pd.read_csv(csv_path)
_CAPTIONS = csv['caption'].tolist()
_IMAGE_IDS = csv['thumbnail_id'].tolist()

print(len(_CAPTIONS))
entries = session.query(Entry).all()

res_emb = []
d = dict()
try:
    for e in entries:  #type:Entry
        d[e.thumbnail_path.split("/")[2]] = e
    for (c, p, emb) in zip(_CAPTIONS, _IMAGE_IDS, _caption_embeddings):
        print(c, p)
        d[p].caption = c
        res_emb.append((d[p].id, emb))
        print(p)
    session.commit()
except Exception as e:
    session.rollback()
    raise e
コード例 #11
0
def compute_feature_over_db():
    cl = KMeanPaletteClassifier()
    total = len(session.query(Entry).all())
    for i, d in enumerate(session.query(Entry).all()):  #type:Entry
        img = cv2.imread(d.thumbnail_path)
        pred = cl.fit(img)[0]
        print(i, total)
        d.color_labels = json.dumps(pred)

    session.commit()


# hdf5_writer.set_path("data/test-features.hdf5", mode="r+")
# hdf5_writer.initialize_dataset("xception_features", shape=(10*10*2048, ), dtype=np.float16)
try:
    compute_feature_over_db()
    hdf5_writer.on_close()
except Exception as e:
    session.rollback()
    raise e

# TESTING #
from random import sample
t = session.query(Entry).all()
t = sample(t, 100)

for k in t:  #type:Entry
    print(k)
    cv2.imshow("out", cv2.imread(k.thumbnail_path))
    print(k.get_colors())
    cv2.waitKey()
コード例 #12
0
def identity(payload):
    user_id = payload['identity']
    user = session.query(User).get(user_id)
    return user.id
コード例 #13
0
def authenticate(username, password):
    user = session.query(User).filter(User.username == username).filter(
        User.password == password).one()

    return user
コード例 #14
0
 def get(**kwargs) -> List[Dict]:
     model = session.query(Conversation).filter_by(**kwargs).first()
     model
     return model
コード例 #15
0
async def del_filter(command_args: str):
    filters = session.query(ScrapFilters)\
        .filter(ScrapFilters.path.like(command_args))\
        .all()
    for scraper_filter in filters:
        session.delete(scraper_filter)
コード例 #16
0
ファイル: server.py プロジェクト: Prior99/go2
def get_player(id):
    return session.query(Player).get(id)