Esempio n. 1
0
 def __init__(self, client=None, db=None, regex_function=False):
     if client:
         self.client = client
     self.db = Database(db=db)
     self.columns = {
         'author': 'TEXT',
         'author.id': 'TEXT',
         'author.name': 'TEXT',
         'author.nick': 'TEXT',
         'clean_content': 'TEXT',
         'channel.name': 'TEXT',
         'message_id': 'TEXT',
         'attachment_urls': 'TEXT',
         'pinned': 'TEXT',
         'reactions': 'TEXT',
         'raw_mentions': 'TEXT',
         'raw_channel_mentions': 'TEXT',
         'raw_role_mentions': 'TEXT',
         'created_at': 'TEXT',
         'edited_at': 'TEXT',
         'jump_url': 'TEXT',
     }
     self.db_name = db
     if regex_function:
         self.db.conn.create_function('REGEXP', 2, self.regexp)
Esempio n. 2
0
def main():
    json_file = open('rules.json').read()

    rules = json.loads(json_file)

    selected_rule = rules.get(RULE_NUMBER)

    sub_rules = selected_rule.get('rules')

    action = selected_rule.get('action')

    overall_predicate = selected_rule.get('overall_predicate')

    sql_fetch_query = create_sql_fetch_query(sub_rules, overall_predicate)

    db = Database(DB_HOST, DB_USER, DB_PASSWORD, DB_NAME)

    print "Fetching data from database\n"
    data = db.fetch(sql_fetch_query)

    msg_ids = [i.get('msg_id') for i in data]

    print "Taking {} action\n".format(action)
    take_action(action, msg_ids)

    print "Done\n"
Esempio n. 3
0
def get_user_info(user_id: int, sid: str = None) -> dict:
    mined_users = [
        row["UserID"] for row in Database.query('SELECT * FROM "MinedUsers"')
    ]

    if user_id not in mined_users:
        mine_user_info(user_id, sid)

    # get user days count in dataset (except weekends)
    total_days = Database.query_row(
        'select "TotalDays" from  "MinedUsers" where "UserID" = %s',
        (user_id, ))['TotalDays']

    result = {}

    result['user_responsibility'] = __calculate_user_responsibility(user_id)
    result['user_sociability'] = __calculate_user_sociability(
        user_id, total_days)
    result['user_procrastination'] = __calculate_user_procrastination(
        user_id, total_days)
    result['user_often_leaving'] = __calculate_user_leaving(user_id)
    result['user_punctuality'] = __calculate_user_punctuality(user_id)
    result['user_leaving_state'] = __calculate_user_leaving_state(user_id)

    return result
Esempio n. 4
0
def main():

    db = Database(DB_HOST, DB_USER, DB_PASSWORD, DB_NAME)

    if not db.is_table_exist(TABLE_NAME):
        create_table(db, TABLE_NAME)

    insert_data(db, TABLE_NAME)
    print "Done\n"
Esempio n. 5
0
def worker(queue):
    global request_count
    db = Database()
    while True:
        steamids = queue.get()
        while True:
            try:
                summaries = get_player_summaries(steamids, timeout=30)
                db.insert_profiles(summaries)
                request_count += 1
            except:
                traceback.print_exc()
Esempio n. 6
0
def __calculate_user_responsibility(user_id: int) -> int:
    '''
    Расчитывает показатель ответственности человека

    :param user_id: идентификатор пользователя
    :type user_id: int
    :return: показатель ответственности человека
    :rtype: int
    '''

    # !Settings
    # max overwork hours per day (setting)
    MAX_OVERWORK = 2

    # get user plan percent
    user_plan_percent = Database.query_row(
        'SELECT "PlanPercent" from "UserPlanPercent" WHERE "UserID" = %s LIMIT 1',
        (user_id, ))

    if user_plan_percent:
        user_plan_percent = user_plan_percent["PlanPercent"]

    # get user overwork
    user_overwork = Database.query_row(
        """
            SELECT sum("Overwork") AS "TotalOverwork", count("UserID") as "TotalDays"
            FROM "UserOverwork"
            WHERE "UserID" = %s
            group by "UserID"
        """, (user_id, ))

    # if can't find overwork and plan info return unknown value
    if not user_overwork and not user_plan_percent:
        return -1

    # if find only user plan info return plan percent
    elif user_plan_percent and not user_overwork:
        return int(user_plan_percent / 10)

    # if find user overwork info
    elif user_overwork:
        # calculate overwork percents
        total_max_overwork = timedelta(hours=MAX_OVERWORK *
                                       user_overwork["TotalDays"])
        overwork_percents = (user_overwork['TotalOverwork'].total_seconds() /
                             total_max_overwork.total_seconds()) * 100
        overwork_percents = 100 if overwork_percents > 100 else overwork_percents

        if user_plan_percent:
            return int((overwork_percents + user_plan_percent) / 20)

        return int(overwork_percents / 10)
Esempio n. 7
0
def __calculate_user_sociability(user_id: int, total_days: int) -> int:
    """
    Расчитывает показатель комуникабельности человека

    :param user_id: идентификатор пользователя
    :type user_id: int
    :param total_days: кол-во дней по которым собрана статистика на пользователя
    :type total_days: int
    :return: аоказатель коммуникабельности
    :rtype: int
    """

    # !Settings
    # max user communication minutes per day (setting)
    MAX_COMMUNICATION = 45

    user_communication = Database.query_row(
        """
        select sum("WastedTime") as "WastedTime"
        from  "UserActivity"
        where "UserID" = %s and "Category" in ('Обмен сообщениями (IM, Почта)', 'Звонки СБИС')
        """, (user_id, ))

    if user_communication["WastedTime"]:
        max_user_communication = timedelta(minutes=MAX_COMMUNICATION *
                                           total_days)
        result = int(
            round(user_communication['WastedTime'].total_seconds() /
                  max_user_communication.total_seconds() * 10))
        return 10 if result > 10 else result
    else:
        return -1
Esempio n. 8
0
def __calculate_user_leaving(user_id: int) -> bool:
    '''
    Расчитывает флаг часто ли пользователь уходит из офиса

    :param user_id: идентификатор пользователя
    :type user_id: int
    :return: флаг частого покидания офиса
    :rtype: bool
    '''

    # !Settings
    # max user leavings per day (setting)
    MAX_LEAVING = 3

    user_leavings = Database.query_row(
        """
        select avg("LeavingCount") as "AvgLeavings"
        from (
            select "DateTime"::date as "Date" , count("Status") as "LeavingCount"
            from "UserLocation"
            where "UserID" = %s and "Status" = 0
            group by "Date"
        ) as "LeavingPerDay"
        """, (user_id, ))
    if user_leavings['AvgLeavings']:
        return True if int(
            user_leavings["AvgLeavings"]) > MAX_LEAVING else False
    else:
        return None
Esempio n. 9
0
def __calculate_user_procrastination(user_id: int, total_days: int) -> int:
    '''
    Расчитывает показатель прокрастенации пользователя

    :param user_id: идентификатор пользователя
    :type user_id: int
    :param total_days: кол-во дней по которым собрана статистика на пользователя
    :type total_days: int
    :return: [показатель прокрастенации пользователя
    :rtype: int
    '''

    # !Settings
    # max user procrastination minutes per day (setting)
    MAX_PROCRASTINATION = 45

    user_procrastination = Database.query_row(
        """
        select sum("WastedTime") as "WastedTime" from "UserActivity"
        where "UserID" = %s and "Useful" = -1
        """, (user_id, ))
    if user_procrastination['WastedTime']:
        total_procrastenation = timedelta(minutes=MAX_PROCRASTINATION *
                                          total_days)
        result = int(
            round(user_procrastination['WastedTime'].total_seconds() /
                  total_procrastenation.total_seconds() * 10))

        return 10 if result > 10 else result

    else:
        return 0
def __get_user_activity(user_id: int, sid: str, datelist: list = None):
    """
    Получает данные активности пользователя и сохраняет их в базу.

    :param user_id: идентификатор пользователя, по которому необходимо собрать статистику
    :type user_id: int
    :param datelist: список дат по которым необходимо собрать статистику, defaults to None
    :param datelist: list, optional
    """

    insert_query = """
        INSERT INTO "UserActivity"("UserID", "Date", "Category", "Useful", "WastedTime")
        VALUES (%s, %s, %s, %s, %s);
    """
    if not datelist:
        datelist = __get_date_range(date.today())

    for cur_date in datelist:

        rpc_result = SabyInvoker.invoke('Report.PersonProductivityStatistic',
                                        sid,
                                        Фильтр=SabyFormatsBuilder.build_record(
                                            {
                                                "Date": cur_date,
                                                "Person": user_id
                                            }),
                                        Сортировка=None,
                                        Навигация=None,
                                        ДопПоля=[])

        person_activities = [
            activity for activity in rpc_result['rec'] if activity['Parent@']
        ] if rpc_result else []

        for activity in person_activities:
            # write user activity
            Database.query(
                insert_query,
                (user_id, cur_date, activity['Name'], activity['Useful'],
                 __convert_magic_string(activity['Duration'])))

        # get user calls
        out_calls = __get_user_out_calls(user_id, cur_date, sid)

        Database.query(
            insert_query,
            (user_id, cur_date, "Звонки СБИС", 0, out_calls['duration']))
def __get_user_neural_data(user_id: int, sid: str):
    # get data from last month
    last_days_range = __get_date_range(date.today(), 1)
    last_count, last_time = __get_user_calls(user_id, last_days_range, sid)
    last_overwork = __get_user_overwork(user_id, last_days_range, sid)
    # get data from first 2 months
    first_start_day = date.today() - relativedelta(months=1)
    first_days_range = __get_date_range(first_start_day, 2)
    first_count, first_time = __get_user_calls(user_id, first_days_range, sid)
    first_overwork = __get_user_overwork(user_id, first_days_range, sid)

    Database.query(
        """
        INSERT INTO "UsersNeuralData"(
            "UserID", "UserFirstCalls", "UserLastCalls", "UserFirstDuration", "UserLastDuration", "UserFirstOverwork", "UserLastOverwork"
        )
        VALUES (%s, %s, %s, %s, %s, %s, %s);
        """, (user_id, first_count, last_count, first_time, last_time,
              first_overwork, last_overwork))
def __get_user_location_and_overwork(user_id: int,
                                     sid: str,
                                     datelist: list = None):
    """
    Получает данные местонахождения пользователя и сохраняет их в базу.

    :param user_id: идентификатор пользователя, по которому необходимо собрать статистику
    :type user_id: int
    :param datelist: список дат по которым необходимо собрать статистику, defaults to None
    :param datelist: list, optional
    """
    if not datelist:
        datelist = __get_date_range(date.today())

    for cur_date in datelist:

        rpc_result = __get_user_day_location(user_id, cur_date, sid)
        entrances = [
            entity for entity in rpc_result['activity_detail']['rec']
            if entity['Описание'] == 'entrance'
        ]

        for entrance in entrances:
            Database.query(
                """
                INSERT INTO "UserLocation" ("UserID", "DateTime", "Status")
                VALUES (%s, %s, %s);
                """, (user_id, entrance["ВремяНачало"], entrance["Действие"]))

        # Смотрим всю активность
        activity_summary = rpc_result.get('activity_summary')

        # Выбираем необходимое время работы
        need_time_str = activity_summary.get('ВремяРаботыГрафик', '00:00:00')
        # Выбираем фактически сколько сотрудник отработал
        fact_time_str = activity_summary.get('ВремяРаботы', '00:00:00')

        Database.query(
            """
            INSERT INTO "UserOverwork" ("UserID", "Date", "Overwork")
            VALUES (%s, %s, %s);
            """, (user_id, cur_date,
                  __calculate_overwork(need_time_str, fact_time_str)))
def mine_user_info(user_id: int, sid: str):
    dates = __get_date_range(date.today())
    # Get user location and overwork
    __get_user_location_and_overwork(user_id, sid, dates)

    # Get user activity
    __get_user_activity(user_id, sid, dates)

    # Get user plan percent
    __get_user_plan_percent(user_id, sid)

    # Prepare neural dataset
    __get_user_neural_data(user_id, sid)
    # Add user id in mined persons
    Database.query(
        """
            INSERT INTO "MinedUsers"("UserID", "TotalDays")
            VALUES (%s, %s);
        """, (user_id, len(dates)))

    # Apply database changes
    Database.commit_changes()
def __get_user_plan_percent(user_id: int, sid: str, month_count: int = 3):
    """
    Получает данные по выполнению плана за указанный период и сохраняет их в базу.

    :param user_id: идентификатор пользователя, по которому необходимо собрать статистику
    :type user_id: int
    :param month_count: кол-во месяцев за которое надо собрать статистику, defaults to 3
    :type user_id: int, optional
    """
    today = date.today()
    start_date = date(year=today.year, month=today.month,
                      day=1) - relativedelta(months=month_count)
    end_date = date(year=today.year, month=today.month,
                    day=1) - relativedelta(days=1)

    rpc_result = SabyInvoker.invoke('ПланРабот.ПунктыНаКарточкеСотрудника',
                                    sid,
                                    Фильтр=SabyFormatsBuilder.build_record({
                                        "ДатаНачала":
                                        str(start_date),
                                        "ДатаОкончания":
                                        str(end_date),
                                        "ФильтрПериод":
                                        "Период",
                                        "ЧастноеЛицо":
                                        user_id
                                    }),
                                    Сортировка=None,
                                    Навигация=None,
                                    ДопПоля=[])
    percent_structure = rpc_result.get('outcome', None) if rpc_result else None

    if percent_structure:
        Database.query(
            """
                INSERT INTO "UserPlanPercent"("UserID", "PlanPercent")
                VALUES (%s, %s);
                """, (user_id, percent_structure['Процент']))
Esempio n. 15
0
class GetUserInfoRepository:

    # initialize GetUserInfoRepository
    def __init__(self):
        self.db = Database()

    # retrieve user from DB
    def get_user(self, userid):
        dt = self.db.select([
            'DiscordUserID', 'UserName', 'UserHash', 'Currency', 'LastDaily',
            'Link AS ProfilePictureURL'
        ], 'discordusers LEFT OUTER JOIN resources ON resources.ResourceID = discordusers.ResourceID',
                            'discorduserid = ' + str(userid))
        return eval(str(dt.getRows()[0]))
Esempio n. 16
0
class DTORepository:

    # initialize DTORepository
    def __init__(self):
        self.db = Database()
    
    def __getKeys(self, entity):
        props = []
        for key in entity.keys():
            props.append(key)
        return props

    # returns the table passed
    def selectAll(self, table):
        dt = self.db.select(['*'], table)
        print('asdf')
        print(str(dt))
        print('asdf')
        return eval(str(dt))
    
    # retrieve information for the get id method on the controller
    def insert(self, table, entity):
        props = self.__getKeys(entity)
        return self.db.insertOne(table, props, entity)
    
    def select(self, table, IDColumn, ID):
        dt = self.db.select(['*'], table, f"{IDColumn} = %s", [ID])
        print(str(dt))
        return eval(str(dt))
    
    def update(self, table, entity, where = '', values = []):
        props = self.__getKeys(entity)
        return self.db.update(table, props, entity, where, values)
    
    def delete(self, table, where = '', values = []):
        dt = self.db.delete(table, where, values)
        return eval(str(dt))
Esempio n. 17
0
class CommonRepository:

    # initialize RaffleRepository
    def __init__(self):
        self.db = Database()
    
    def add_currency_to_table(self, table, idcolumn, id, amount):
        currency = self.db.select(['Currency'], table, f"{idcolumn} = %s", [id]).getRows()[0]['Currency']
        newTotal = int(currency) + int(amount)
        self.db.update(table, ['Currency'], {
            'Currency': newTotal
        }, f"{idcolumn} = %s", [id])
    
    # adds amount to specified users's currency
    def add_currency_to_raffle(self, raffleid, amount):
        self.add_currency_to_table('raffles', 'RaffleID', raffleid, amount)
    
    # adds amount to specified users's currency
    def add_to_user_currency(self, discorduserid, amount):
        self.add_currency_to_table('discordusers', 'DiscordUserID', discorduserid, amount)
    
    # subtracts amount from specified users's currency
    def subtract_from_user_currency(self, discorduserid, amount):
        self.add_to_user_currency(discorduserid, -1 * amount)
Esempio n. 18
0
def worker(queue):
    global request_count, keyerror_count, unexpected_error_count
    db = Database()
    while True:
        listing_id, listing_link = queue.get()
        try:
            for activity in get_activities(listing_id):
                if activity["type"] == "BuyOrderCancel" or activity["type"] == "BuyOrderMulti":
                    continue
                listing = Listing(
                    game=int(listing_link.split('/')[5]),
                    item_name=unquote(listing_link.split('/')[6]),
                    price=parse_price(activity["price"]),
                    owner_name=activity["persona_seller"] or activity["persona_buyer"],
                    owner_avatar=activity["avatar_seller"] or activity["avatar_buyer"]
                )
                db.insert_listing(listing)
            request_count += 1
        except KeyError:
            keyerror_count += 1
        except:
            unexpected_error_count += 1
            traceback.print_exc()
        queue.put((listing_id, listing_link))
Esempio n. 19
0
def __calculate_user_leaving_state(user_id: int) -> int:
    try:
        user_neural_data = Database.query_row(
            """
            select "UserFirstCalls", "UserLastCalls", "UserFirstDuration", "UserLastDuration", "UserFirstOverwork", "UserLastOverwork"
            from "UsersNeuralData"
            where "UserID" = %s
            """, (user_id, ))
        XpredictInputData = numpy.array([list(user_neural_data)])
        print(XpredictInputData)
        with NeuralNetwork.graph.as_default():
            return int(
                round(
                    NeuralNetwork.model.predict(XpredictInputData)[0][0] * 10))
    except BaseException as exc:
        print(exc)
        return -1
class CollectiblesRepository:

    # initialize CollectiblesRepository
    def __init__(self):
        self.db = Database()
        self._commonRepository = CommonRepository()

    def get_collectibles_for_user(self, DiscordUserID):
        try:
            result = eval(str(self.db.select(['*'], 'discordusercollectibles'
                + ' JOIN collectibles ON collectibles.CollectibleID = discordusercollectibles.CollectibleID',
                'discordusercollectibles.DiscordUserID = %s', [DiscordUserID])))
            return result, StatusCodes.OK
        except:
            return 'error in get_collectibles_for_user', StatusCodes.INTERNAL_SERVER_ERROR
    
    def purchase(self, rDiscordUserID, rCollectibleName):
        try:
            userCurrenyQuery = self.db.select(['Currency'], 'discordusers', 'DiscordUserID = %s', [rDiscordUserID]).getRows()
            if len(userCurrenyQuery) > 0:
                userCurrency = userCurrenyQuery[0]['Currency']
                collectibleQuery = self.db.select(['*'], 'collectibles', 'Name = %s', [rCollectibleName]).getRows()
                if len(collectibleQuery) > 0:
                    collectibleItem = collectibleQuery[0]
                    if len(self.db.select(['*'], 'discordusercollectibles', 'DiscordUserID = %s AND CollectibleID = %s', [rDiscordUserID, collectibleItem['CollectibleID']]).getRows()) == 0:
                        # make sure the user has enough currency to purchase this collectible
                        collectiblePrice = int(collectibleItem['Currency'])
                        if userCurrency >= collectiblePrice:
                            # insert into DiscorduserCollectibles table
                            self.db.insertOne('discordusercollectibles', ['DiscordUserID', 'CollectibleID', 'Date'], {
                                'DiscordUserID': rDiscordUserID,
                                'CollectibleID': collectibleItem['CollectibleID'],
                                'Date': str(datetime.datetime.now())
                            })
                            # decrement the user's currency
                            print('line 40')
                            self._commonRepository.subtract_from_user_currency(rDiscordUserID, collectiblePrice)
                            print('line 42')
                            # return OK
                            return f'Successfully purchased {rCollectibleName}', StatusCodes.OK
                        else:
                            # the user does not have enough currency to purchase this collectible
                            return 'Insufficient funds', StatusCodes.IM_A_TEAPOT
                    else:
                        # the user has already purchased this collectible
                        return 'the user has already purchased this collectible', StatusCodes.CONFLICT
                else:
                    # no collectibles in the DB with a matching name
                    return f"Could not find a collectible with name '{rCollectibleName}'", StatusCodes.NOT_FOUND
        except:
            # some error has occurred
            return '', StatusCodes.INTERNAL_SERVER_ERROR
Esempio n. 21
0
def __calculate_user_punctuality(user_id: int) -> int:
    # !Settings
    # max deviation minutes per day (setting)
    MAX_DEVIATION = 30

    user_incoming = Database.query(
        """
        With "Dates" as (
            select "DateTime"::date as "Date"
            from "UserLocation"
            Where "UserID" = %s and "Status" = 1
            group by "Date"
        )
        select dates."Date", min(main."DateTime"::time) as "ComingTime"
        from "UserLocation" as main
        inner join "Dates" as dates
            on dates."Date" = main."DateTime"::date
        group by dates."Date"
        order by dates."Date"
        """, (user_id, ))
    if user_incoming:
        # get user incoming times as seconds
        timelist = [row['ComingTime'] for row in user_incoming]
        timelist = [
            timedelta(hours=x.hour, minutes=x.minute,
                      seconds=x.second).total_seconds() for x in timelist
        ]

        # convert max deviation to seconds
        time_max_deviation = timedelta(minutes=MAX_DEVIATION).total_seconds()

        # calculate average tive with deviation
        k_mean = kmeans(timelist, 1)[0][0]
        max_time = k_mean + time_max_deviation
        min_time = k_mean - time_max_deviation

        # find punctual points
        punctual = [
            time for time in timelist if time > min_time and time < max_time
        ]
        return int(round(len(punctual) / len(timelist) * 10))

    return -1
Esempio n. 22
0
#!/usr/bin/env python3
import os
import re

from flask import Flask, request, render_template, redirect

from helpers import Database, Listing, get_player_summaries

app = Flask(__name__)
db = Database()


@app.route("/")
def index():
    user_address = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
    # # user_ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)

    # print(f'\n \n \n \n{user_address} \n \n \n \n')
    if user_address != '178.236.57.82':
        return render_template('restriction.html', ip_addr=user_address)

    else:

        minprice = request.args.get("minprice", 30)
        maxprice = request.args.get("maxprice", 2000)
        csgo = request.args.get("disable_csgo", "off") != "on"
        dota = request.args.get("disable_dota2", "off") != "on"
        tf = request.args.get("disable_tf2", "off") != "on"
        minprice = int(minprice)
        maxprice = int(maxprice)
Esempio n. 23
0
 def __init__(self):
     self.db = Database()
Esempio n. 24
0
class ProfileRepository:

    # initialize ProfileRepository
    def __init__(self):
        self.db = Database()

    def trim(self, string):
        return string[:string.index('?')] if '?' in string else string

    def dtToMapStr(self, dt):
        result = ''
        counter = 0
        for row in dt:
            result += f"{row['k']}!!!!!{self.trim(row['v'])}"
            counter += 1
            result += '!!!!!!!!!!' if counter < len(dt) else ''
        return 'None' if result == '' else result

    def get_url(self, discorduserid):
        try:
            # SELECT discordusers.DiscordUserID, discordusers.UserName, discordusers.UserHash,
            # discordusers.Currency, discordusers.LastDaily, resources.Link AS ProfilePictureURL
            # FROM discordusers
            # JOIN resources ON discordusers.ResourceID = resources.ResourceID
            # WHERE discorduserid = '309176098590294026';
            dt = self.db.select([
                'discordusers.DiscordUserID', 'discordusers.UserName',
                'discordusers.UserHash', 'discordusers.Currency',
                'discordusers.LastDaily', 'resources.Link AS ProfilePictureURL'
            ], 'discordusers JOIN resources ON discordusers.ResourceID = resources.ResourceID',
                                'DiscordUserID = %s', [discorduserid])
            if len(dt.getRows()) > 0:
                discorduser = eval(str(dt.getRows()[0]))
                # SELECT DISTINCT collectibles.Name, resources.Link
                # FROM discordusercollectibles
                # JOIN collectibles ON discordusercollectibles.CollectibleID = collectibles.CollectibleID
                # JOIN resources ON collectibles.ResourceID = resources.ResourceID
                # WHERE discorduserid = '309176098590294026';
                collectibles = []
                dt = self.db.select(
                    ['DISTINCT collectibles.Name AS k', 'resources.Link AS v'],
                    '''discordusercollectibles
                    JOIN collectibles ON discordusercollectibles.CollectibleID = collectibles.CollectibleID
                    JOIN resources ON collectibles.ResourceID = resources.ResourceID''',
                    'discorduserid = %s', [discorduserid])
                if len(dt.getRows()) > 0:
                    collectibles = eval(str(dt))
                # collectibles = []

                # SELECT DISTINCT socialmedias.Title, resources.Link
                # FROM discordusersocialmedias
                # JOIN socialmedias ON discordusersocialmedias.SocialMediaID = socialmedias.SocialMediaID
                # JOIN resources ON socialmedias.ResourceID = resources.ResourceID
                # WHERE discorduserid = '309176098590294026';
                socialmedias = []
                dt = self.db.select([
                    'DISTINCT socialmedias.Title AS k', 'resources.Link AS v'
                ], '''discordusersocialmedias
                    JOIN socialmedias ON discordusersocialmedias.SocialMediaID = socialmedias.SocialMediaID
                    JOIN resources ON socialmedias.ResourceID = resources.ResourceID''',
                                    'discorduserid = %s', [discorduserid])
                if len(dt.getRows()) > 0:
                    socialmedias = eval(str(dt))

                collectiblesmap = self.dtToMapStr(collectibles)
                socialmediasmap = self.dtToMapStr(socialmedias)
                url = ""
                url += f"app/profile?Currency={discorduser['Currency']}&DiscordUserID={discorduser['DiscordUserID']}&"
                url += f"LastDaily={discorduser['LastDaily']}&ProfilePictureURL={self.trim(discorduser['ProfilePictureURL'])}&"
                url += f"UserName={discorduser['UserName']}&UserHash={discorduser['UserHash']}&"
                url += f"Collectibles={collectiblesmap}&SocialMedias={socialmediasmap}"
                return url, StatusCodes.OK
            else:
                return 'user not found', StatusCodes.NOT_FOUND
        except Exception as e:
            return eval(str(e)), StatusCodes.INTERNAL_SERVER_ERROR
 def __init__(self):
     self.db = Database()
     self._commonRepository = CommonRepository()
Esempio n. 26
0
def agent(game,
          n_ep,
          n_mcts,
          max_ep_len,
          lr,
          c,
          gamma,
          data_size,
          batch_size,
          temp,
          n_hidden_layers,
          n_hidden_units,
          stochastic=False,
          eval_freq=-1,
          eval_episodes=100,
          alpha=0.6,
          out_dir='../',
          pre_process=None,
          visualize=False):
    ''' Outer training loop '''
    if pre_process is not None:
        pre_process()

    # tf.reset_default_graph()

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    episode_returns = []  # storage
    timepoints = []
    # Environments
    Env = make_game(game)
    is_atari = is_atari_game(Env)
    mcts_env = make_game(game) if is_atari else None
    online_scores = []
    offline_scores = []
    mcts_params = dict(gamma=gamma)
    if stochastic:
        mcts_params['alpha'] = alpha
        mcts_maker = MCTSStochastic
    else:
        mcts_maker = MCTS

    D = Database(max_size=data_size, batch_size=batch_size)
    model = Model(Env=Env,
                  lr=lr,
                  n_hidden_layers=n_hidden_layers,
                  n_hidden_units=n_hidden_units)
    t_total = 0  # total steps
    R_best = -np.Inf

    with tf.Session() as sess:
        model.sess = sess
        sess.run(tf.global_variables_initializer())

        for ep in range(n_ep):
            if eval_freq > 0 and ep % eval_freq == 0:  #and ep > 0
                print(
                    'Evaluating policy for {} episodes!'.format(eval_episodes))
                seed = np.random.randint(1e7)  # draw some Env seed
                Env.seed(seed)
                s = Env.reset()
                mcts = mcts_maker(root_index=s,
                                  root=None,
                                  model=model,
                                  na=model.action_dim,
                                  **mcts_params)
                env_wrapper = EnvEvalWrapper()
                env_wrapper.mcts = mcts
                starting_states = []

                def reset_env():
                    s = Env.reset()
                    env_wrapper.mcts = mcts_maker(root_index=s,
                                                  root=None,
                                                  model=model,
                                                  na=model.action_dim,
                                                  **mcts_params)
                    starting_states.append(s)
                    if env_wrapper.curr_probs is not None:
                        env_wrapper.episode_probabilities.append(
                            env_wrapper.curr_probs)
                    env_wrapper.curr_probs = []
                    return s

                def forward(a, s, r):
                    env_wrapper.mcts.forward(a, s, r)
                    #pass

                env_wrapper.reset = reset_env
                env_wrapper.step = lambda x: Env.step(x)
                env_wrapper.forward = forward
                env_wrapper.episode_probabilities = []
                env_wrapper.curr_probs = None

                def pi_wrapper(ob):
                    if not is_atari:
                        mcts_env = None
                    env_wrapper.mcts.search(n_mcts=n_mcts,
                                            c=c,
                                            Env=Env,
                                            mcts_env=mcts_env)
                    state, pi, V = env_wrapper.mcts.return_results(temp=0)
                    #pi = model.predict_pi(s).flatten()
                    env_wrapper.curr_probs.append(pi)
                    a = np.argmax(pi)
                    return a

                rews, lens = eval_policy(pi_wrapper,
                                         env_wrapper,
                                         n_episodes=eval_episodes,
                                         verbose=True)
                offline_scores.append([
                    np.min(rews),
                    np.max(rews),
                    np.mean(rews),
                    np.std(rews),
                    len(rews),
                    np.mean(lens)
                ])
                # if len(rews) < eval_episodes or len(rews) == 0:
                #     print("WTF")
                # if np.std(rews) == 0.:
                #     print("WTF 2")
                np.save(out_dir + '/offline_scores.npy', offline_scores)
            start = time.time()
            s = Env.reset()
            R = 0.0  # Total return counter
            a_store = []
            seed = np.random.randint(1e7)  # draw some Env seed
            Env.seed(seed)
            if is_atari:
                mcts_env.reset()
                mcts_env.seed(seed)
            if ep % eval_freq == 0:
                print("Collecting %d episodes" % eval_freq)
            mcts = mcts_maker(
                root_index=s,
                root=None,
                model=model,
                na=model.action_dim,
                **mcts_params)  # the object responsible for MCTS searches
            for t in range(max_ep_len):
                # MCTS step
                if not is_atari:
                    mcts_env = None
                mcts.search(n_mcts=n_mcts, c=c, Env=Env,
                            mcts_env=mcts_env)  # perform a forward search
                if visualize:
                    mcts.visualize()
                state, pi, V = mcts.return_results(
                    temp)  # extract the root output
                D.store((state, V, pi))

                # Make the true step
                a = np.random.choice(len(pi), p=pi)
                a_store.append(a)
                s1, r, terminal, _ = Env.step(a)
                R += r
                t_total += n_mcts  # total number of environment steps (counts the mcts steps)

                if terminal:
                    break
                else:
                    mcts.forward(a, s1, r)

            # Finished episode
            episode_returns.append(R)  # store the total episode return
            online_scores.append(R)
            timepoints.append(
                t_total)  # store the timestep count of the episode return
            store_safely(out_dir, 'result', {
                'R': episode_returns,
                't': timepoints
            })
            np.save(out_dir + '/online_scores.npy', online_scores)
            # print('Finished episode {}, total return: {}, total time: {} sec'.format(ep, np.round(R, 2),
            #                                                                          np.round((time.time() - start),
            #                                                                                   1)))

            if R > R_best:
                a_best = a_store
                seed_best = seed
                R_best = R

            # Train
            D.reshuffle()
            try:
                for epoch in range(1):
                    for sb, Vb, pib in D:
                        model.train(sb, Vb, pib)
            except Exception as e:
                print("ASD")
            model.save(out_dir + 'model')
    # Return results
    return episode_returns, timepoints, a_best, seed_best, R_best, offline_scores
Esempio n. 27
0
def agent(game, n_ep, n_mcts, max_ep_len, lr, c, gamma, data_size, batch_size,
          temp, n_hidden_layers, n_hidden_units):
    ''' Outer training loop '''
    # tf.reset_default_graph()
    episode_returns = []  # storage
    timepoints = []
    # Environments
    Env = make_game(game)
    is_atari = is_atari_game(Env)
    mcts_env = make_game(game) if is_atari else None

    D = Database(max_size=data_size, batch_size=batch_size)
    model = Model(Env=Env,
                  lr=lr,
                  n_hidden_layers=n_hidden_layers,
                  n_hidden_units=n_hidden_units)
    t_total = 0  # total steps
    R_best = -np.Inf

    with tf.Session() as sess:
        model.sess = sess
        sess.run(tf.global_variables_initializer())
        for ep in range(n_ep):
            start = time.time()
            s = Env.reset()
            R = 0.0  # Total return counter
            a_store = []
            seed = np.random.randint(1e7)  # draw some Env seed
            Env.seed(seed)
            if is_atari:
                mcts_env.reset()
                mcts_env.seed(seed)

            mcts = MCTS(
                root_index=s,
                root=None,
                model=model,
                na=model.action_dim,
                gamma=gamma)  # the object responsible for MCTS searches
            for t in range(max_ep_len):
                # MCTS step
                mcts.search(n_mcts=n_mcts, c=c, Env=Env,
                            mcts_env=mcts_env)  # perform a forward search
                state, pi, V = mcts.return_results(
                    temp)  # extract the root output
                D.store((state, V, pi))

                # Make the true step
                a = np.random.choice(len(pi), p=pi)
                a_store.append(a)
                s1, r, terminal, _ = Env.step(a)
                R += r
                t_total += n_mcts  # total number of environment steps (counts the mcts steps)

                if terminal:
                    break
                else:
                    mcts.forward(a, s1)

            # Finished episode
            episode_returns.append(R)  # store the total episode return
            timepoints.append(
                t_total)  # store the timestep count of the episode return
            store_safely(os.getcwd(), 'result', {
                'R': episode_returns,
                't': timepoints
            })

            if R > R_best:
                a_best = a_store
                seed_best = seed
                R_best = R
            print('Finished episode {}, total return: {}, total time: {} sec'.
                  format(ep, np.round(R, 2), np.round((time.time() - start),
                                                      1)))
            # Train
            D.reshuffle()
            for epoch in range(1):
                for sb, Vb, pib in D:
                    model.train(sb, Vb, pib)
    # Return results
    return episode_returns, timepoints, a_best, seed_best, R_best
Esempio n. 28
0
def agent(game,
          n_ep,
          n_mcts,
          max_ep_len,
          lr,
          c,
          gamma,
          data_size,
          batch_size,
          temp,
          n_hidden_layers,
          n_hidden_units,
          stochastic=False,
          eval_freq=-1,
          eval_episodes=100,
          alpha=0.6,
          n_epochs=100,
          c_dpw=1,
          numpy_dump_dir='../',
          pre_process=None,
          visualize=False,
          game_params={},
          parallelize_evaluation=False,
          mcts_only=False,
          particles=0,
          show_plots=False,
          n_workers=1,
          use_sampler=False,
          budget=np.inf,
          unbiased=False,
          biased=False,
          max_workers=100,
          variance=False,
          depth_based_bias=False,
          scheduler_params=None,
          out_dir=None,
          render=False,
          second_version=False,
          third_version=False):
    visualizer = None

    # if particles:
    #     parallelize_evaluation = False  # Cannot run parallelized evaluation with particle filtering

    if not mcts_only:
        from mcts import MCTS
        from mcts_dpw import MCTSStochastic
    elif particles:
        if unbiased:
            from particle_filtering.ol_uct import OL_MCTS
        elif biased:
            if second_version:
                from particle_filtering.pf_uct_2 import PFMCTS2 as PFMCTS
            elif third_version:
                from particle_filtering.pf_uct_3 import PFMCTS3 as PFMCTS
            else:
                from particle_filtering.pf_uct import PFMCTS
        else:
            from particle_filtering.pf_mcts_edo import PFMCTS
    else:
        from pure_mcts.mcts import MCTS
        from pure_mcts.mcts_dpw import MCTSStochastic

    if parallelize_evaluation:
        print("The evaluation will be parallel")

    parameter_list = {
        "game": game,
        "n_ep": n_ep,
        "n_mcts": n_mcts,
        "max_ep_len": max_ep_len,
        "lr": lr,
        "c": c,
        "gamma": gamma,
        "data_size": data_size,
        "batch_size": batch_size,
        "temp": temp,
        "n_hidden_layers": n_hidden_layers,
        "n_hidden_units": n_hidden_units,
        "stochastic": stochastic,
        "eval_freq": eval_freq,
        "eval_episodes": eval_episodes,
        "alpha": alpha,
        "n_epochs": n_epochs,
        "out_dir": numpy_dump_dir,
        "pre_process": pre_process,
        "visualize": visualize,
        "game_params": game_params,
        "n_workers": n_workers,
        "use_sampler": use_sampler,
        "variance": variance,
        "depth_based_bias": depth_based_bias,
        "unbiased": unbiased,
        "second_version": second_version,
        'third_version': third_version
    }
    if out_dir is not None:
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)
        with open(os.path.join(out_dir, "parameters.txt"), 'w') as d:
            d.write(json.dumps(parameter_list))
    #logger = Logger(parameter_list, game, show=show_plots)

    if DEBUG_TAXI:
        from utils.visualization.taxi import TaxiVisualizer
        with open(game_params["grid"]) as f:
            m = f.readlines()
            matrix = []
            for r in m:
                row = []
                for ch in r.strip('\n'):
                    row.append(ch)
                matrix.append(row)
            visualizer = TaxiVisualizer(matrix)
            f.close()
            exit()
    ''' Outer training loop '''
    if pre_process is not None:
        pre_process()

    # numpy_dump_dir = logger.numpy_dumps_dir
    #
    # if not os.path.exists(numpy_dump_dir):
    #     os.makedirs(numpy_dump_dir)

    episode_returns = []  # storage
    timepoints = []

    # Environments
    if game == 'Trading-v0':
        game_params['save_dir'] = out_dir  #logger.save_dir
    Env = make_game(game, game_params)
    num_actions = Env.action_space.n
    sampler = None
    if use_sampler and not (unbiased or biased):

        def make_pi(action_space):
            def pi(s):
                return np.random.randint(low=0, high=action_space.n)

            return pi

        def make_env():
            return make_game(game, game_params)

        sampler = ParallelSampler(make_pi=make_pi,
                                  make_env=make_env,
                                  n_particles=particles,
                                  n_workers=n_workers,
                                  seed=10)

    is_atari = is_atari_game(Env)
    mcts_env = make_game(game, game_params) if is_atari else None
    online_scores = []
    offline_scores = []

    # Setup the parameters for generating the search environments

    if game == "RaceStrategy-v1":
        mcts_maker, mcts_params, c_dpw = load_race_agents_config(
            'envs/configs/race_strategy_full.json', gamma)

    else:
        mcts_params = dict(gamma=gamma)
        if particles:
            if not (biased or unbiased):
                mcts_params['particles'] = particles
                mcts_params['sampler'] = sampler
            elif biased:
                mcts_params['alpha'] = alpha
                mcts_maker = PFMCTS

            mcts_params['depth_based_bias'] = depth_based_bias
            if unbiased:
                mcts_params['variance'] = variance
                mcts_maker = OL_MCTS

        elif stochastic:
            mcts_params['alpha'] = alpha
            mcts_params['depth_based_bias'] = depth_based_bias
            mcts_maker = MCTSStochastic
        else:
            mcts_maker = MCTS

    # Prepare the database for storing training data to be sampled
    db = Database(max_size=data_size, batch_size=batch_size)

    # TODO extract dimensions to avoid allocating model
    # Setup the model
    model_params = {
        "Env": Env,
        "lr": lr,
        "n_hidden_layers": n_hidden_layers,
        "n_hidden_units": n_hidden_units,
        "joint_networks": True
    }

    model_wrapper = ModelWrapper(**model_params)

    t_total = 0  # total steps
    R_best = -np.Inf
    a_best = None
    seed_best = None

    # Variables for storing values to be plotted
    avgs = []
    stds = []

    # Run the episodes
    for ep in range(n_ep):

        if DEBUG_TAXI:
            visualizer.reset()

        ##### Policy evaluation step #####
        if eval_freq > 0 and ep % eval_freq == 0:  # and ep > 0
            print(
                '--------------------------------\nEvaluating policy for {} episodes!'
                .format(eval_episodes))
            seed = np.random.randint(1e7)  # draw some Env seed
            Env.seed(seed)
            s = Env.reset()

            if parallelize_evaluation:
                penv = None
                pgame = {
                    "game_maker": make_game,
                    "game": game,
                    "game_params": game_params
                }
            else:
                penv = Env
                pgame = None

            model_file = os.path.join(out_dir, "model.h5")

            # model_wrapper.save(model_file)

            if game == "RaceStrategy-v1":
                env_wrapper = RaceWrapper(s,
                                          mcts_maker,
                                          model_file,
                                          model_params,
                                          mcts_params,
                                          is_atari,
                                          n_mcts,
                                          budget,
                                          mcts_env,
                                          c_dpw,
                                          temp,
                                          env=penv,
                                          game_maker=pgame,
                                          mcts_only=mcts_only,
                                          scheduler_params=scheduler_params)
            else:
                env_wrapper = Wrapper(s,
                                      mcts_maker,
                                      model_file,
                                      model_params,
                                      mcts_params,
                                      is_atari,
                                      n_mcts,
                                      budget,
                                      mcts_env,
                                      c_dpw,
                                      temp,
                                      env=penv,
                                      game_maker=pgame,
                                      mcts_only=mcts_only,
                                      scheduler_params=scheduler_params)

            # Run the evaluation
            if parallelize_evaluation:
                total_reward, reward_per_timestep, lens, action_counts = \
                    parallelize_eval_policy(env_wrapper, n_episodes=eval_episodes, verbose=False, max_len=max_ep_len,
                                            max_workers=max_workers, out_dir=out_dir)
            else:
                total_reward, reward_per_timestep, lens, action_counts = \
                    eval_policy(env_wrapper, n_episodes=eval_episodes, verbose=False, max_len=max_ep_len,
                                visualize=visualize, out_dir=out_dir, render=render)

            # offline_scores.append([np.min(rews), np.max(rews), np.mean(rews), np.std(rews),
            #                        len(rews), np.mean(lens)])

            offline_scores.append(
                [total_reward, reward_per_timestep, lens, action_counts])

            #np.save(numpy_dump_dir + '/offline_scores.npy', offline_scores)

            # Store and plot data
            avgs.append(np.mean(total_reward))
            stds.append(np.std(total_reward))

            #logger.plot_evaluation_mean_and_variance(avgs, stds)

        ##### Policy improvement step #####

        if not mcts_only:

            start = time.time()
            s = start_s = Env.reset()
            R = 0.0  # Total return counter
            a_store = []
            seed = np.random.randint(1e7)  # draw some Env seed
            Env.seed(seed)
            if is_atari:
                mcts_env.reset()
                mcts_env.seed(seed)

            if eval_freq > 0 and ep % eval_freq == 0:
                print("\nCollecting %d episodes" % eval_freq)
            mcts = mcts_maker(
                root_index=s,
                root=None,
                model=model_wrapper,
                na=model_wrapper.action_dim,
                **mcts_params)  # the object responsible for MCTS searches

            print("\nPerforming MCTS steps\n")

            ep_steps = 0
            start_targets = []

            for st in range(max_ep_len):

                print_step = max_ep_len // 10
                if st % print_step == 0:
                    print('Step ' + str(st + 1) + ' of ' + str(max_ep_len))

                # MCTS step
                if not is_atari:
                    mcts_env = None
                mcts.search(n_mcts=n_mcts, c=c, Env=Env,
                            mcts_env=mcts_env)  # perform a forward search

                if visualize:
                    mcts.visualize()

                state, pi, V = mcts.return_results(
                    temp)  # extract the root output

                # Save targets for starting state to debug
                if np.array_equal(start_s, state):
                    if DEBUG:
                        print("Pi target for starting state:", pi)
                    start_targets.append((V, pi))
                db.store((state, V, pi))

                # Make the true step
                a = np.random.choice(len(pi), p=pi)
                a_store.append(a)

                s1, r, terminal, _ = Env.step(a)

                # Perform command line visualization if necessary
                if DEBUG_TAXI:
                    olds, olda = copy.deepcopy(s1), copy.deepcopy(a)
                    visualizer.visualize_taxi(olds, olda)
                    print("Reward:", r)

                R += r
                t_total += n_mcts  # total number of environment steps (counts the mcts steps)
                ep_steps = st + 1

                if terminal:
                    break  # Stop the episode if we encounter a terminal state
                else:
                    mcts.forward(a, s1, r)  # Otherwise proceed

            # Finished episode
            if DEBUG:
                print("Train episode return:", R)
                print("Train episode actions:", a_store)
            episode_returns.append(R)  # store the total episode return
            online_scores.append(R)
            timepoints.append(
                t_total)  # store the timestep count of the episode return
            #store_safely(numpy_dump_dir, '/result', {'R': episode_returns, 't': timepoints})
            #np.save(numpy_dump_dir + '/online_scores.npy', online_scores)

            if DEBUG or True:
                print(
                    'Finished episode {} in {} steps, total return: {}, total time: {} sec'
                    .format(ep, ep_steps, np.round(R, 2),
                            np.round((time.time() - start), 1)))
            # Plot the online return over training episodes

            #logger.plot_online_return(online_scores)

            if R > R_best:
                a_best = a_store
                seed_best = seed
                R_best = R

            print()

            # Train only if the model has to be used
            if not mcts_only:
                # Train
                try:
                    print("\nTraining network")
                    ep_V_loss = []
                    ep_pi_loss = []

                    for _ in range(n_epochs):
                        # Reshuffle the dataset at each epoch
                        db.reshuffle()

                        batch_V_loss = []
                        batch_pi_loss = []

                        # Batch training
                        for sb, Vb, pib in db:

                            if DEBUG:
                                print("sb:", sb)
                                print("Vb:", Vb)
                                print("pib:", pib)

                            loss = model_wrapper.train(sb, Vb, pib)

                            batch_V_loss.append(loss[1])
                            batch_pi_loss.append(loss[2])

                        ep_V_loss.append(mean(batch_V_loss))
                        ep_pi_loss.append(mean(batch_pi_loss))

                    # Plot the loss over training epochs

                    #logger.plot_loss(ep, ep_V_loss, ep_pi_loss)

                except Exception as e:
                    print("Something wrong while training:", e)

                # model.save(out_dir + 'model')

                # Plot the loss over different episodes
                #logger.plot_training_loss_over_time()

                pi_start = model_wrapper.predict_pi(start_s)
                V_start = model_wrapper.predict_V(start_s)

                print("\nStart policy: ", pi_start)
                print("Start value:", V_start)

                #logger.log_start(ep, pi_start, V_start, start_targets)

    # Return results
    if use_sampler:
        sampler.close()
    return episode_returns, timepoints, a_best, seed_best, R_best, offline_scores
Esempio n. 29
0
 def __init__(self, source_type, name=''):
     if source_type == 'database' and name:
         db = Database(name)
         self.db = db
class CurrencyRepository:

    # initialize CurrencyRepository
    def __init__(self):
        self.db = Database()

    def insertTransaction(self, fromID, toID, time, amount):
        self.db.insertOne(
            'currencytransactions',
            ['FromDiscordUserID', 'ToDiscordUserID', 'Date', 'Amount'], {
                'FromDiscordUserID': fromID,
                'ToDiscordUserID': toID,
                'Date': time,
                'Amount': amount
            })

    def daily(self, discordUserID, amount):
        try:
            dt = self.db.select(['*'], 'discordusers', 'DiscordUserID = %s',
                                [discordUserID])
            user = dt.getRows()[0] if len(dt.getRows()) == 1 else None
            if user != None:
                now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                timeUntilNextDaily = 0
                if user['LastDaily'] == None:
                    timeUntilNextDaily = 60 * 60 * 24
                else:
                    nowtime = datetime.datetime.now().timestamp()
                    thentime = datetime.datetime.fromisoformat(
                        str(user['LastDaily'])).timestamp()
                    timeUntilNextDaily = int(thentime + (60 * 60 * 24) -
                                             nowtime)
                if timeUntilNextDaily < 0:
                    userJSON = eval(str(user))
                    userJSON['LastDaily'] = now
                    userJSON['Currency'] = int(userJSON['Currency']) + amount
                    self.insertTransaction(0, discordUserID, now, amount)
                    self.db.update('discordusers', ['Currency', 'LastDaily'],
                                   userJSON, 'DiscordUserID = %s',
                                   [discordUserID])
                    return True
                else:
                    return timeUntilNextDaily * 1000
            else:
                return False
        except:
            return False

    def transfer(self, senderID, receiverID, amount):
        try:
            if amount > 0:
                dt1 = self.db.select(['*'], 'discordusers',
                                     'DiscordUserID = %s', [senderID])
                dt2 = self.db.select(['*'], 'discordusers',
                                     'DiscordUserID = %s', [receiverID])
                sendingUser = dt1.getRows()[0] if len(
                    dt1.getRows()) == 1 else None
                receivingUser = dt2.getRows()[0] if len(
                    dt2.getRows()) == 1 else None
                if sendingUser != None and receivingUser != None:
                    now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                    sendingUserJSON = eval(str(sendingUser))
                    receivingUserJSON = eval(str(receivingUser))
                    sendingUserJSON['Currency'] = int(
                        sendingUserJSON['Currency']) - amount
                    receivingUserJSON['Currency'] = int(
                        receivingUserJSON['Currency']) + amount
                    self.insertTransaction(senderID, receiverID, now, amount)
                    self.db.update('discordusers', ['Currency'],
                                   sendingUserJSON, 'DiscordUserID = %s',
                                   [senderID])
                    self.db.update('discordusers', ['Currency'],
                                   receivingUserJSON, 'DiscordUserID = %s',
                                   [receiverID])
                    return True
                else:
                    return False
            else:
                return 'You cannot transfer a negative amount.'
        except:
            return False