Ejemplo n.º 1
0
def get_email():
    try:
        db = DBInterface()
        emails = json.loads(db.get_email(option=1))['emails']
        emails = (','.join(emails))

        with open(EMAILPATH, "w") as f:
            print("Email list updated.")
            f.write(emails)
        f.close()

    except:
        print("Unable to FETCH emails from database server.")
Ejemplo n.º 2
0
class ImageRecognitionWorker:
    worker = None
    db = None

    def __init__(self):
        self.worker = GearmanWorker(['gearman.emag-bot.com'])
        self.worker.register_task('imgrecon', self.ImageRecognition)

        self.db = DBInterface()

    def Work(self):
        self.worker.work()

    def ComputeHistogram(self, url):
        ima = ImageAnalyser()
        ima.LoadImage(url, PathLocation.ONLINE)
        contour = ima.FindContour()
        im_e = ima.ExtractContour(contour)
        #ima.ShowImage(im_e)
        hist = ima.GetHueHistogram(im_e)

        return hist

    def ImageRecognition(self, worker, job):
        print("Got job: " + job.data)
        data = json.loads(job.data)
        hist = self.ComputeHistogram(data['url'])

        db_entries = self.db.QueryForLabels(data['labels'])

        accepted_entries = [[], []]
        i = 0
        for row in db_entries:
            #print(row)
            if row[3] is None or row[3] == '':
                continue

            data = json.loads(row[3])
            data = np.array([[d] for d in data], dtype=np.float32)
            res = cv2.compareHist(hist, data, cv2.HISTCMP_CORREL)

            if row[4] in accepted_entries[1]:
                idx = accepted_entries[1].index(row[4])
                if res > accepted_entries[0][idx]:
                    accepted_entries[0][idx] = res
            else:
                accepted_entries[0].append(res)
                accepted_entries[1].append(row[4])

        ret = [
            x for _, x in sorted(zip(accepted_entries[0], accepted_entries[1]),
                                 reverse=True)
        ]
        print(ret)

        return json.dumps(ret)
Ejemplo n.º 3
0
    def makeTree(self):
        db = DBInterface(self, self.dbname)
        basedict = {}

        #############################################################
        idDefault = FileNode("PictureInfo", "ID", db,
                             IDFormatConverter("PictureInfo"))
        idSpecials = None
        idTree = DBNode("ID", "PictureInfo", "ID", db,
                        IDFormatConverter("PictureInfo"), idDefault,
                        idSpecials)
        basedict["ByID"] = idTree
        #############################################################

        #############################################################
        #setDefault = idTree
        #setSpecials = None
        #setTree = DBAggregateNode("rating",
        #                          "rating",
        #                          db,
        #                          SimpleFormatConverter("rating"),
        #                          ratingDefault,
        #                          ratingSpecials )
        #basedict["ByRating"] = ratingTree
        ##############################################################

        #############################################################
        #virtualDefault = titleTree
        #virtualSpecials = None
        #virtualTree = DBNode("virtual",
        #                     "virtual",
        #                     db,
        #                     SimpleFormatConverter("virtual"),
        #                     virtualDefault,
        #                     virtualSpecials )
        #basedict["Virtual"] = virtualTree
        #############################################################

        ########### NOT DONE ##############
        #basedict["ByStar"] = starTree
        #
        ###################################

        self.tree = BaseNode(basedict)
Ejemplo n.º 4
0
    def makeTree(self):
        db = DBInterface(self, self.dbname)

        #############################################################
        titleDefault = FileNode("VideoInfo", "title", db,
                                TitleFormatConverter("VideoInfo"))
        titleSpecials = None
        titleTree = DBNode("title", "VideoInfo", "title", db,
                           TitleFormatConverter("VideoInfo"), titleDefault,
                           titleSpecials)
        basedict = {}
        basedict["ByTitle"] = titleTree
        #############################################################

        #############################################################
        ratingDefault = titleTree
        ratingSpecials = None
        ratingTree = DBNode("rating", "VideoInfo", "rating", db,
                            SimpleFormatConverter("VideoInfo", "rating"),
                            ratingDefault, ratingSpecials)
        basedict["ByRating"] = ratingTree
        #############################################################

        #############################################################
        virtualDefault = titleTree
        virtualSpecials = None
        virtualTree = DBNode("virtual", "VideoInfo", "virtual", db,
                             SimpleFormatConverter("VideoInfo", "virtual"),
                             virtualDefault, virtualSpecials)
        basedict["Virtual"] = virtualTree
        #############################################################

        ########### NOT DONE ##############
        #basedict["ByStar"] = starTree
        #
        ###################################

        self.tree = BaseNode(basedict)
Ejemplo n.º 5
0
import time
import flightStats
import reportStats
from datetime import datetime, timedelta, timezone
from DBInterface import DBInterface
from CrewInterface import CrewInterface
import visualization
import pandas as pd

pd.set_option('display.width', 320)
pd.set_option('display.max_columns', 30)
pd.set_option('mode.chained_assignment', None)

url_su = 'https://admin-su.crewplatform.aero/'
url_fv = 'https://admin-fv.crewplatform.aero/'
DB = DBInterface()

while True:
    start_date = datetime.now(timezone.utc)
    table_list = DB.get_list_of_tables()
    interface_su = CrewInterface(url_su)
    interface_fv = CrewInterface(url_fv)

    if 'flights_su' in table_list:
        old_flights_su = DB.read_table('flights_su', 'departureDate')
        df_flights_su = flightStats.update_flights(interface_su,
                                                   old_flights_su,
                                                   filter_numbers=3000)
    else:
        df_flights_su = flightStats.get_flights_table(interface_su,
                                                      start_date +
Ejemplo n.º 6
0
    def __init__(self):
        self.worker = GearmanWorker(['gearman.emag-bot.com'])
        self.worker.register_task('imgrecon', self.ImageRecognition)

        self.db = DBInterface()
Ejemplo n.º 7
0
"""
This sample demonstrates a simple skill built with the Amazon Alexa Skills Kit.
The Intent Schema, Custom Slots, and Sample Utterances for this skill, as well
as testing instructions are located at http://amzn.to/1LzFrj6

For additional samples, visit the Alexa Skills Kit Getting Started guide at
http://amzn.to/1LGWsLG
"""

from __future__ import print_function
from DBInterface import DBInterface

connection = DBInterface('connection.txt')

# --------------- Helpers that build all of the responses ----------------------


def build_speechlet_response(title, output, reprompt_text, should_end_session):
    return {
        'outputSpeech': {
            'type': 'PlainText',
            'text': output
        },
        'card': {
            'type': 'Simple',
            'title': "SessionSpeechlet - " + title,
            'content': "SessionSpeechlet - " + output
        },
        'reprompt': {
            'outputSpeech': {
                'type': 'PlainText',
Ejemplo n.º 8
0
class VKIntegrator:
    def __init__(self):
        self.vkp = VKParser()
        self.dbi = DBInterface()

    def update_group_members(self, groups=[]):
        # load list of groups to check users, if groups are not passed
        if not groups:
            groups = self.dbi.get_default_groups_to_update_members()

        self.dbi.groups_members_truncate_staging()

        counter = 1
        for g in groups:
            print('loading group', counter, 'of', len(groups))
            counter += 1

            for retry in range(3):
                try:
                    members = pd.DataFrame({
                        'group_id':
                        g,
                        'user_id':
                        self.vkp.get_group_members(g)
                    })
                    break
                except ValueError as err:
                    print(err)
                    print('retry this group in 3 sec..')
                    time.sleep(3)

            # upload users to staging
            print('uploading group', g, 'members to sql staging')
            self.dbi.groups_members_add_to_staging(members)

        # write only enters/exits to dbo using SQL sproc
        print('mergin\' staging->dbo inside SQL')
        self.dbi.groups_members_merge()

        print('group members updated')

    def e1_extend_groups(self):
        print('executing e1_extend_groups sproc')
        connection_string = 'mssql+pyodbc://localhost\\SQLEXPRESS/VK?driver=SQL+Server'
        engine = sqlalchemy.create_engine(connection_string)
        engine.execution_options(autocommit=True).execute(
            'exec VK.ext.e1_extend_groups @basic_group = 144657300')

    def e1_get_extended_groups(self):
        conn = pyodbc.connect(
            r'Driver={SQL Server};Server=.\SQLEXPRESS;Database=VK;Trusted_Connection=yes;'
        )
        groups_df = pd.read_sql(
            'select group_id, run_time '
            'from ext.e1_vw_extended_groups ', conn)
        print('got', groups_df.shape[0],
              'extended groups for extension with run time',
              groups_df['run_time'][0])
        return list(groups_df['group_id'])

    def get_interesting_users(self, source_vw='dbo.vw_interesting_users'):
        conn = pyodbc.connect(
            r'Driver={SQL Server};Server=.\SQLEXPRESS;Database=VK;Trusted_Connection=yes;'
        )
        users = pd.read_sql('select user_id ' 'from ' + source_vw, conn)
        return list(users['user_id'])

    def update_users_groups(self, users):
        upload_to_sql_block_size = 1000  # каждую 1000 обработанных юзеров отправляем результат в SQL

        connection_string = 'mssql+pyodbc://localhost\\SQLEXPRESS/VK?driver=SQL+Server'
        engine = sqlalchemy.create_engine(connection_string)

        for b in range(0, len(users), upload_to_sql_block_size):
            print('Users groups block extraction launched. processed', b,
                  'users of', len(users))
            users_block = users[b:b + upload_to_sql_block_size]
            users_groups = self.vkp.get_users_groups(users_block)

            # upload users to staging
            print('uploading block with users_groups to sql')
            users_groups.to_sql(schema='staging',
                                name='users_groups',
                                con=engine,
                                index=False,
                                if_exists='replace')

            # write only enters/exits to dbo using SQL sproc
            print('mergin\' state diff inside SQL')
            engine.execution_options(autocommit=True).execute(
                'exec VK.discovering.merge_users_groups')

            print('block of users_groups uploaded to SQL')

        print('update_users_groups finished')

    def update_groups(self, max_groups_to_update=200000):
        conn = pyodbc.connect(
            r'Driver={SQL Server};Server=.\SQLEXPRESS;Database=VK;Trusted_Connection=yes;'
        )
        groups = pd.read_sql(
            'select top ' + str(max_groups_to_update) + ' group_id '
            'from dbo.vw_groups  '
            'order by users_with_grp desc', conn)
        groups_list = list(groups['group_id'])

        upload_to_sql_block_size = 2000
        for b in range(0, len(groups_list), upload_to_sql_block_size):
            print('Group info block extraction launched. processed', b,
                  'groups of', len(groups_list))
            block = groups_list[b:b + upload_to_sql_block_size]

            groups_info = self.vkp.get_groups_info(block)

            # upload users to staging
            print('uploading block with users_groups to sql')
            connection_string = 'mssql+pyodbc://localhost\\SQLEXPRESS/VK?driver=SQL+Server'
            engine = sqlalchemy.create_engine(connection_string)
            groups_info.to_sql(schema='staging',
                               name='groups',
                               con=engine,
                               index=False,
                               if_exists='replace')

            # write only enters/exits to dbo using SQL sproc
            print('mergin\' state diff inside SQL')
            engine.execution_options(
                autocommit=True).execute('exec VK.discovering.merge_groups')

            print('block of group info uploaded to SQL')

    def post_photos(self, source_id, source_album, to_group, since_dt):
        # source_id = user_id or minus group_id
        times_to_post = [[6, 44], [7, 44], [11, 44], [12, 44], [17, 44],
                         [19, 44], [20, 44], [21, 44], [22, 44], [23, 44]]

        photos = self.vkp.get_photos(source_id, source_album)
        exclude = []
        print('WARNING! excluded', len(set(photos).intersection(exclude)),
              'photos')
        photos = [x for x in photos if x not in exclude]
        print('uploading', len(photos), 'photos')

        first_dt = datetime.strptime(since_dt, '%Y-%m-%d')
        days_counter = 0
        for day in range(0, len(photos), len(times_to_post)):
            post_date = first_dt + timedelta(days=days_counter)
            days_counter += 1

            daily_photos = photos[day:day + len(times_to_post)]

            for photo in range(0, len(daily_photos)):
                post_time = post_date.replace(hour=times_to_post[photo][0],
                                              minute=times_to_post[photo][1])
                print(post_time, daily_photos[photo])
                self.vkp.wall_post(
                    owner_id=-to_group,
                    from_group=1  # от имени группы
                    ,
                    attachments='photo' + daily_photos[photo],
                    publish_date=post_time.timestamp())
                time.sleep(0.3)  # to avoid ban
            if days_counter % 3 == 0:
                print(
                    'long 25 sec sleep to avoid captcha, and meaningless photo api call'
                )
                time.sleep(25)
                self.vkp.get_photos(source_id, source_album)

    def scan_walls(self, source_ids=[]):
        # source_ids - список источников. Либо -group_id, либо user_id

        max_post_age_days = 10  # смотрим на все посты за последние max_post_age_days. по более старым инфо не обновляем
        x_dates_ago = datetime.now().date() - timedelta(days=max_post_age_days)
        oldest_post_unix_time = time.mktime(x_dates_ago.timetuple())

        # если список источников не задан - то берем его из sql вьюхи
        if not source_ids:
            source_ids = self.dbi.get_default_walls_to_scan()

        # посты в стэйжинг добавляем по мере цикла, мерджим одним заходом. поэтому сначала надо зачистить стэйдж
        self.dbi.posts_truncate_staging()
        self.dbi.likes_and_reposts_truncate_staging()

        # идем по всем стенам
        for source_id in source_ids:
            print('scanning wall posts of source', source_id)
            recent_posts = self.vkp.get_recent_posts(source_id,
                                                     oldest_post_unix_time)

            # идем по всем постам на стене, собираем лойсы-репосты
            posts_list = list(recent_posts['post_id'])
            print('scanning likes and reposts for received posts of source',
                  source_id)
            likes_and_reposts = self.vkp.get_likes_and_reposts(
                source_id, posts_list)

            # складываем посты в базу уже после того, как успешно выгребли все лайки по ним
            # чтобы в случае падения можно было смерджить стэйж на середине и не порушить данные
            print('loading posts and likes and repost to staging..')
            self.dbi.posts_add_to_staging(recent_posts)
            self.dbi.likes_and_reposts_to_staging(likes_and_reposts)

        # все данные успешно залиты в sql staging. мерджим их в дбо
        self.dbi.posts_merge()
        self.dbi.likes_and_reposts_merge()
        pass
Ejemplo n.º 9
0
 def __init__(self):
     self.vkp = VKParser()
     self.dbi = DBInterface()
Ejemplo n.º 10
0
    def makeTree(self):
        db = DBInterface(self, self.dbname)
        table = "AudioInfo"
        songDefault = FileNode(table, "title", db,
                               TrackTitleFormatConverter(table))
        songSpecials = None
        songTree = DBNode("song", table, "title", db,
                          TrackTitleFormatConverter(table), songDefault,
                          songSpecials)

        albumDefault = songTree
        albumSpecials = None
        albumTree = DBNode("album", table, "album", db,
                           SimpleFormatConverter(table, "album"), albumDefault,
                           albumSpecials)

        artistDefault = albumTree
        artistSpecials = None
        artistTree = DBNode("artist", table, "artist", db,
                            SimpleFormatConverter(table, "artist"),
                            artistDefault, artistSpecials)

        timeDefault = artistTree
        timeSpecials = None
        timeTree = DBNode("time", table, "year", db,
                          SimpleFormatConverter(table, "year"), timeDefault,
                          timeSpecials)
        timeAggregateSpecials = {
            "Unknown": timeDefault,
            "0": timeDefault
        }
        timeAggregate = DBAggregateNode("time_aggregator", table, "year", db,
                                        DecadeAggregator(),
                                        DecadeConverter(table, "year"),
                                        timeTree, timeAggregateSpecials)

        genreDefault = artistTree
        genreSpecials = {"Soundtrack": albumTree}
        genreTree = DBNode("genre", table, "genre", db,
                           SimpleFormatConverter(table, "genre"), genreDefault,
                           genreSpecials)

        allSongDefault = FileNode(table, "title", db,
                                  ArtistTitleFormatConverter(table))
        allSongSpecials = None
        allSongTree = DBNode("song", table, "title", db,
                             ArtistTitleFormatConverter(table), allSongDefault,
                             allSongSpecials)
        allSongTreeSpecials = None
        allSongAggregate = DBAggregateNode(
            "song", table, "title", db, FirstDigitAggregator(),
            AlphabeticalConverter(table, "title"), allSongTree,
            allSongTreeSpecials)

        basedict = {}
        basedict["ByGenre"] = genreTree
        basedict["ByArtist"] = artistTree
        ##########################################
        # Use this to not have a year sort
        #basedict["ByTime"] = timeTree
        ##########################################
        basedict["ByTime"] = timeAggregate

        ##########################################
        # Use this to not have an alphabetical sort
        # basedict["BySong"] = allSongTree
        ##########################################
        basedict["BySong"] = allSongAggregate

        self.tree = BaseNode(basedict)