Ejemplo n.º 1
0
    async def _run_event(self, event, *args, **kwargs):
        try:
            self._dispatch(Plugin.dispatch_raw, event, *args, **kwargs)
        except Exception as e:
            logger.error('Error dispatching raw event: {}'.format(e))

        try:
            await getattr(self, event)(*args, **kwargs)
        except asyncio.CancelledError:
            pass
        except Exception:
            try:
                await self.on_error(event, *args, **kwargs)
            except asyncio.CancelledError:
                pass
Ejemplo n.º 2
0
    def __read_circle_count(self, filename, identifier, ratio, communicator, xcord=False, crop=False, click=False,
                            canny=False, secondratio=False):
        logger.debug("__read_circle_count: Reading circles")

        try:
            screenshot_read = cv2.imread(filename)
        except Exception:
            logger.error("Screenshot corrupted :(")
            return -1

        if screenshot_read is None:
            logger.error("Screenshot corrupted :(")
            return -1

        height, width, _ = screenshot_read.shape

        if crop:
            screenshot_read = screenshot_read[int(height) - int(int(height / 4)):int(height),
                                            int(int(width) / 2) - int(int(width) / 8):int(int(width) / 2) + int(
                                            int(width) / 8)]

        logger.debug("__read_circle_count: Determined screenshot scale: " +
                     str(height) + " x " + str(width))
        gray = cv2.cvtColor(screenshot_read, cv2.COLOR_BGR2GRAY)
        # detect circles in the image

        if not secondratio:
            radMin = int((width / float(ratio) - 3) / 2)
            radMax = int((width / float(ratio) + 3) / 2)
        else:
            radMin = int((width / float(ratio) - 3) / 2)
            radMax = int((width / float(secondratio) + 3) / 2)
        if canny:
            gray = cv2.GaussianBlur(gray, (3, 3), 0)
            gray = cv2.Canny(gray, 100, 50, apertureSize=3)

        logger.debug("__read_circle_count: Detect radius of circle: Min " +
                     str(radMin) + " Max " + str(radMax))
        circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, width / 8, param1=100, param2=15, minRadius=radMin,
                                   maxRadius=radMax)
        circle = 0
        # ensure at least some circles were found
        if circles is not None:
            # convert the (x, y) coordinates and radius of the circles to integers
            circles = np.round(circles[0, :]).astype("int")
            # loop over the (x, y) coordinates and radius of the circles
            for (x, y, r) in circles:

                if not xcord:
                    circle += 1
                    if click:
                        logger.debug(
                            '__read_circle_count: found Circle - click it')
                        communicator.click(
                            width / 2, ((int(height) - int(height / 4.5))) + y)
                        time.sleep(2)
                else:
                    if x >= (width / 2) - 100 and x <= (width / 2) + 100 and y >= (height - (height / 3)):
                        circle += 1
                        if click:
                            logger.debug(
                                '__read_circle_count: found Circle - click on: it')
                            communicator.click(
                                width / 2, ((int(height) - int(height / 4.5))) + y)
                            time.sleep(2)

            logger.debug(
                "__read_circle_count: Determined screenshot to have " + str(circle) + " Circle.")
            return circle
        else:
            logger.debug(
                "__read_circle_count: Determined screenshot to have 0 Circle")
            return -1
Ejemplo n.º 3
0
def trash_image_matching(screen_img):
    clicklist: List[Trash] = []
    screen = cv2.imread(screen_img)
    # print (screen.shape[:2])
    screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)

    if screen is None:
        logger.error('trash_image_matching: {} appears to be corrupted', str(screen_img))
        return None

    trash = cv2.imread('utils/trashcan.png', 0)

    height, width = screen.shape
    _quest_x = get_delete_quest_coords(width)
    _inventory_x = get_delete_item_coords(width)

    if trash.mean() == 255 or trash.mean() == 0:
        return clicklist

    if width <= 1080 and width > 720:
        sc_from = 0.5
        sc_till = 1
    elif width == 720:
        sc_from = 0.5
        sc_till = 0.7
    elif width == 1440:
        sc_from = 0.5
        sc_till = 1.5
    else:
        sc_from = 0.1
        sc_till = 2

    for scale in np.linspace(sc_from, sc_till, 15)[::-1]:

        resized = imutils.resize(
            trash, width=int(trash.shape[1] * scale))
        (tH, tW) = resized.shape[:2]

        last_y_coord = 0
        res = cv2.matchTemplate(screen, resized, cv2.TM_CCOEFF_NORMED)
        threshold = 0.5
        loc = np.where(res >= threshold)
        boxcount = 0
        for pt in zip(*loc[::-1]):
            if pt[0] > width/4*3 and pt[1] < height/5*4:
                x_coord = int(pt[0] + tW / 2)
                y_coord = int(pt[1] + tH / 2)

                if last_y_coord > 0:
                    if last_y_coord + 100 > y_coord or last_y_coord - 100 > y_coord:
                        if (_inventory_x - 50 < x_coord < _inventory_x + 50) or \
                                (_quest_x - 50 < x_coord < _quest_x + 50):
                            last_y_coord = y_coord
                    else:
                        if (_inventory_x - 50 < x_coord < _inventory_x + 50) or \
                                (_quest_x - 50 < x_coord < _quest_x + 50):
                            clicklist.append(Trash(x_coord, y_coord))
                            last_y_coord = y_coord
                            # cv2.rectangle(screen, pt, (pt[0] + tW, pt[1] + tH), (128, 128, 128), 2)
                else:
                    if (_inventory_x - 50 < x_coord < _inventory_x + 50) or \
                            (_quest_x - 50 < x_coord < _quest_x + 50):
                        clicklist.append(Trash(x_coord, y_coord))
                        last_y_coord = y_coord
                        # cv2.rectangle(screen, pt, (pt[0] + tW, pt[1] + tH), (128, 128, 128), 2)
                boxcount += 1

        # cv2.namedWindow("output", cv2.WINDOW_KEEPRATIO)
        # cv2.imshow("output", screen)
        # cv2.waitKey(0)

        if boxcount >= 1: break

    return clicklist
Ejemplo n.º 4
0
    def process_update_queue(self):
        logger.info("Starting Device Job processor")
        time.sleep(10)
        while True:
            try:
                jobstatus = jobReturn.UNKNOWN
                try:
                    item = self._update_queue.get()
                except Empty:
                    time.sleep(2)
                    continue

                if item not in self._log:
                    continue

                id_ = item
                origin = self._log[str(id_)]['origin']
                file_ = self._log[str(id_)]['file']
                counter = self._log[str(id_)]['counter']
                jobtype = self._log[str(id_)]['jobtype']
                waittime = self._log[str(id_)].get('waittime', 0)
                processtime = self._log[str(id_)].get('processingdate', None)
                globalid = self._log[str(id_)]['globalid']
                redo = self._log[str(id_)].get('redo', False)

                laststatus = self._globaljoblog[globalid]['laststatus']
                lastjobid = self._globaljoblog[globalid].get('lastjobid', 0)
                startwithinit = self._globaljoblog[globalid].get(
                    'startwithinit', False)

                if laststatus is not None and laststatus == 'faulty' and  \
                        self._globaljoblog[globalid].get('autojob', False):
                    # breakup job because last job in chain is faulty
                    logger.error(
                        'Breakup job {} on device {} - File/Job: {} - previous job in chain was broken (ID: {})'
                        .format(str(jobtype), str(origin), str(file_),
                                str(id_)))
                    self.write_status_log(str(id_),
                                          field='status',
                                          value='terminated')
                    self.send_webhook(id_=id_, status=jobReturn.TERMINATED)
                    continue

                if (laststatus is None or laststatus == 'future') and not startwithinit and processtime is None and \
                        self._globaljoblog[globalid].get('autojob', False):
                    logger.debug(
                        'Autjob (no init run) {} on device {} - File/Job: {} - queued to real starttime (ID: {})'
                        .format(str(jobtype), str(origin), str(file_),
                                str(id_)))
                    # just schedule job - not process the first time
                    processtime = datetime.timestamp(datetime.now(
                    ) + timedelta(
                        minutes=self._globaljoblog[globalid].get('algo', 0) +
                        waittime))
                    self.write_status_log(str(id_),
                                          field='processingdate',
                                          value=processtime)

                    self._globaljoblog[globalid]['lastjobid'] = id_
                    self._globaljoblog[globalid]['laststatus'] = 'future'

                    self.add_job(globalid=globalid,
                                 origin=origin,
                                 file=file_,
                                 id_=id_,
                                 type=jobtype,
                                 counter=counter,
                                 status='future',
                                 waittime=waittime,
                                 processtime=processtime,
                                 redo=redo)

                    continue

                if (laststatus is None or laststatus
                        == 'success') and waittime > 0 and processtime is None:
                    # set sleeptime for this job
                    logger.debug(
                        'Job {} on device {} - File/Job: {} - queued to real starttime (ID: {})'
                        .format(str(jobtype), str(origin), str(file_),
                                str(id_)))

                    self._log[str(id_)]['processingdate'] = datetime.timestamp(
                        datetime.now() + timedelta(minutes=waittime))

                    self._globaljoblog[globalid]['lastjobid'] = id_
                    self._globaljoblog[globalid]['laststatus'] = 'success'

                    self.add_job(globalid=globalid,
                                 origin=origin,
                                 file=file_,
                                 id_=id_,
                                 type=jobtype,
                                 counter=counter,
                                 status='future',
                                 waittime=waittime,
                                 processtime=processtime,
                                 redo=redo)

                    continue

                if laststatus is not None and laststatus in ('pending', 'future', 'failure', 'interrupted',
                                                             'not connected') and lastjobid != id_ \
                        and processtime is None:
                    logger.debug(
                        'Job {} on device {} - File/Job: {} - queued because last job in jobchain '
                        'is not processed till now (ID: {})'.format(
                            str(jobtype), str(origin), str(file_), str(id_)))
                    # skipping because last job in jobchain is not processed till now
                    self.add_job(globalid=globalid,
                                 origin=origin,
                                 file=file_,
                                 id_=id_,
                                 type=jobtype,
                                 counter=counter,
                                 status='future',
                                 waittime=waittime,
                                 processtime=processtime,
                                 redo=redo)

                    continue

                if processtime is not None and datetime.fromtimestamp(
                        processtime) > datetime.now():
                    time.sleep(1)
                    logger.debug(
                        'Job {} on device {} - File/Job: {} - queued of processtime in future (ID: {})'
                        .format(str(jobtype), str(origin), str(file_),
                                str(id_)))
                    self.add_job(globalid=globalid,
                                 origin=origin,
                                 file=file_,
                                 id_=id_,
                                 type=jobtype,
                                 counter=counter,
                                 status='future',
                                 waittime=waittime,
                                 processtime=processtime,
                                 redo=redo)

                    continue

                if id_ in self._log:
                    self._current_job_id = id_

                    if 'processingdate' in self._log[id_]:
                        self.write_status_log(str(id_),
                                              field='processingdate',
                                              delete=True)

                    logger.info(
                        "Job for {} (File/Job: {}) started (ID: {})".format(
                            str(origin), str(file_), str(id_)))
                    self.write_status_log(str(id_),
                                          field='status',
                                          value='processing')
                    self.write_status_log(str(id_),
                                          field='lastprocess',
                                          value=int(time.time()))

                    errorcount = 0

                    while jobstatus != jobReturn.SUCCESS and errorcount < 3:

                        temp_comm = self._websocket.get_origin_communicator(
                            origin)

                        if temp_comm is None:
                            errorcount += 1
                            logger.error(
                                'Cannot start job {} on device {} - File/Job: {} - Device not connected (ID: {})'
                                .format(str(jobtype), str(origin), str(file_),
                                        str(id_)))
                            self._globaljoblog[globalid][
                                'laststatus'] = 'not connected'
                            self.write_status_log(str(id_),
                                                  field='laststatus',
                                                  value='not connected')
                            self._globaljoblog[globalid]['lastjobid'] = id_
                            jobstatus = jobReturn.NOCONNECT
                            time.sleep(5)

                        else:
                            # stop worker
                            self._websocket.set_job_activated(origin)
                            self.write_status_log(str(id_),
                                                  field='status',
                                                  value='starting')
                            try:
                                if self.start_job_type(item, jobtype,
                                                       temp_comm):
                                    logger.info(
                                        'Job {} could be executed successfully - Device {} - File/Job {} (ID: {})'
                                        .format(str(jobtype), str(origin),
                                                str(file_), str(id_)))
                                    self.write_status_log(str(id_),
                                                          field='status',
                                                          value='success')
                                    self.write_status_log(str(id_),
                                                          field='laststatus',
                                                          value='success')
                                    self._globaljoblog[globalid][
                                        'laststatus'] = 'success'
                                    self._globaljoblog[globalid][
                                        'lastjobid'] = id_
                                    jobstatus = jobReturn.SUCCESS

                                else:
                                    logger.error(
                                        'Job {} could not be executed successfully - Device {} - File/Job {} (ID: {})'
                                        .format(str(jobtype), str(origin),
                                                str(file_), str(id_)))
                                    errorcount += 1
                                    self._globaljoblog[globalid][
                                        'laststatus'] = 'failure'
                                    self.write_status_log(str(id_),
                                                          field='laststatus',
                                                          value='failure')
                                    self._globaljoblog[globalid][
                                        'lastjobid'] = id_
                                    jobstatus = jobReturn.FAILURE

                                # start worker
                                self._websocket.set_job_deactivated(origin)

                            except:
                                logger.error(
                                    'Job {} could not be executed successfully (fatal error) '
                                    '- Device {} - File/Job {} (ID: {})'.
                                    format(str(jobtype), str(origin),
                                           str(file_), str(id_)))
                                errorcount += 1
                                self._globaljoblog[globalid][
                                    'laststatus'] = 'interrupted'
                                self.write_status_log(str(id_),
                                                      field='status',
                                                      value='interrupted')
                                self._globaljoblog[globalid]['lastjobid'] = id_
                                jobstatus = jobReturn.FAILURE

                    # check jobstatus and readd if possible
                    if jobstatus != jobReturn.SUCCESS and (
                            jobstatus == jobReturn.NOCONNECT
                            and self._args.job_restart_notconnect == 0):
                        logger.error(
                            "Job for {} (File/Job: {} - Type {}) failed 3 times in row - aborting (ID: {})"
                            .format(str(origin), str(file_), str(jobtype),
                                    str(id_)))
                        self._globaljoblog[globalid]['laststatus'] = 'faulty'
                        self.write_status_log(str(id_),
                                              field='status',
                                              value='faulty')

                        if redo and self._globaljoblog[globalid].get(
                                'redoonerror', False):
                            logger.info(
                                'Readd this automatic job for {} (File/Job: {} - Type {})  (ID: {})'
                                .format(str(origin), str(file_), str(jobtype),
                                        str(id_)))
                            self.restart_job(id_=id_)
                            self._globaljoblog[globalid]['lastjobid'] = id_
                            self._globaljoblog[globalid][
                                'laststatus'] = 'success'

                    elif jobstatus == jobReturn.SUCCESS and redo:
                        logger.info(
                            'Readd this automatic job for {} (File/Job: {} - Type {})  (ID: {})'
                            .format(str(origin), str(file_), str(jobtype),
                                    str(id_)))
                        self.restart_job(id_=id_)

                    elif jobstatus == jobReturn.NOCONNECT and self._args.job_restart_notconnect > 0:
                        logger.error(
                            "Job for {} (File/Job: {} - Type {}) failed 3 times in row - requeued it (ID: {})"
                            .format(str(origin), str(file_), str(jobtype),
                                    str(id_)))
                        processtime = datetime.timestamp(
                            datetime.now() + timedelta(
                                minutes=self._args.job_restart_notconnect))
                        self.write_status_log(str(id_),
                                              field='processingdate',
                                              value=processtime)

                        self._globaljoblog[globalid]['lastjobid'] = id_
                        self._globaljoblog[globalid]['laststatus'] = 'future'

                        self.add_job(globalid=globalid,
                                     origin=origin,
                                     file=file_,
                                     id_=id_,
                                     type=jobtype,
                                     counter=counter,
                                     status='future',
                                     waittime=waittime,
                                     processtime=processtime,
                                     redo=redo)

                    self.send_webhook(id_=id_, status=jobstatus)

                    self._current_job_id = 0
                    errorcount = 0
                    time.sleep(10)

            except KeyboardInterrupt as e:
                logger.info(
                    "process_update_queue received keyboard interrupt, stopping"
                )
                if self.t_updater is not None:
                    self.t_updater.join()
                break

            time.sleep(5)
Ejemplo n.º 5
0
def unsafe(action, *args, **kwargs):
    try:
        action(*args, **kwargs)
    except:
        logger.error(format_exc())
Ejemplo n.º 6
0
    def start_update(self):

        if self._version < 1:
            logger.info('Execute Update for Version 1')
            # Adding quest_reward for PMSF ALT
            if self._dbwrapper._check_column_exists('trs_quest',
                                                    'quest_reward') == 0:
                alter_query = (
                    "ALTER TABLE trs_quest "
                    "ADD quest_reward VARCHAR(500) NULL AFTER quest_condition")
                try:
                    self._dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

            # Adding quest_task = ingame quest conditions
            if self._dbwrapper._check_column_exists('trs_quest',
                                                    'quest_task') == 0:
                alter_query = (
                    "ALTER TABLE trs_quest "
                    "ADD quest_task VARCHAR(150) NULL AFTER quest_reward")
                try:
                    self._dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

            # Adding form column for rm / monocle if not exists
            if self._application_args.db_method == "rm":
                alter_query = ("ALTER TABLE raid "
                               "ADD form smallint(6) DEFAULT NULL")
                column_exist = self._dbwrapper._check_column_exists(
                    'raid', 'form')
            elif self._application_args.db_method == "monocle":
                alter_query = ("ALTER TABLE raids "
                               "ADD form smallint(6) DEFAULT NULL")
                column_exist = self._dbwrapper._check_column_exists(
                    'raids', 'form')
            else:
                logger.error("Invalid db_method in config. Exiting")
                sys.exit(1)

            if column_exist == 0:
                try:
                    self._dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

        if self._version < 2:
            alter_query = ("ALTER TABLE trs_quest "
                           "CHANGE quest_reward "
                           "quest_reward VARCHAR(1000) NULL DEFAULT NULL")
            try:
                self._dbwrapper.execute(alter_query, commit=True)
            except Exception as e:
                logger.info("Unexpected error: {}", e)
        if self._version < 3:
            if self._application_args.db_method == "monocle":
                # Add Weather Index
                alter_query = (
                    "ALTER TABLE weather ADD UNIQUE s2_cell_id (s2_cell_id) USING BTREE"
                )
                try:
                    self._dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

                # Change Mon Unique Index
                alter_query = (
                    "ALTER TABLE sightings DROP INDEX timestamp_encounter_id_unique"
                )
                try:
                    self._dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

                alter_query = (
                    "ALTER TABLE sightings DROP INDEX encounter_id;")
                try:
                    self._dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

                alter_query = ("CREATE TABLE sightings_temp LIKE sightings;")
                try:
                    self._dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

                alter_query = (
                    "ALTER TABLE sightings_temp ADD UNIQUE(encounter_id);")
                try:
                    self._dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

                alter_query = (
                    "INSERT IGNORE INTO sightings_temp SELECT * FROM sightings ORDER BY id;"
                )
                try:
                    self._dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

                alter_query = (
                    "RENAME TABLE sightings TO backup_sightings, sightings_temp TO sightings;"
                )
                try:
                    self._dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

                alter_query = ("DROP TABLE backup_sightings;")
                try:
                    self._dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

        if self._version < 7:
            alter_query = ("ALTER TABLE trs_status "
                           "ADD lastPogoReboot varchar(50) NULL DEFAULT NULL")
            column_exist = self._dbwrapper._check_column_exists(
                'trs_status', 'lastPogoReboot')
            if column_exist == 0:
                try:
                    self._dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

            alter_query = ("ALTER TABLE trs_status "
                           "ADD globalrebootcount int(11) NULL DEFAULT '0'")
            column_exist = self._dbwrapper._check_column_exists(
                'trs_status', 'globalrebootcount')
            if column_exist == 0:
                try:
                    self._dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

            alter_query = ("ALTER TABLE trs_status "
                           "ADD globalrestartcount int(11) NULL DEFAULT '0'")
            column_exist = self._dbwrapper._check_column_exists(
                'trs_status', 'globalrestartcount')
            if column_exist == 0:
                try:
                    self._dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

            alter_query = ("ALTER TABLE trs_status CHANGE lastPogoRestart "
                           "lastPogoRestart VARCHAR(50) NULL DEFAULT NULL")
            try:
                self._dbwrapper.execute(alter_query, commit=True)
            except Exception as e:
                logger.info("Unexpected error: {}", e)

            if self._application_args.db_method == "monocle":
                alter_query = (
                    "alter table sightings add column costume smallint(6) default 0"
                )
                column_exist = self._dbwrapper._check_column_exists(
                    'sightings', 'costume')
                if column_exist == 0:
                    try:
                        self._dbwrapper.execute(alter_query, commit=True)
                    except Exception as e:
                        logger.info("Unexpected error: {}", e)

            alter_query = (
                "ALTER TABLE trs_status "
                "CHANGE currentPos currentPos VARCHAR(50) NULL DEFAULT NULL, "
                "CHANGE lastPos lastPos VARCHAR(50) NULL DEFAULT NULL, "
                "CHANGE routePos routePos INT(11) NULL DEFAULT NULL, "
                "CHANGE routeMax routeMax INT(11) NULL DEFAULT NULL, "
                "CHANGE rebootingOption rebootingOption TEXT NULL, "
                "CHANGE rebootCounter rebootCounter INT(11) NULL DEFAULT NULL, "
                "CHANGE routemanager routemanager VARCHAR(255) NULL DEFAULT NULL, "
                "CHANGE lastProtoDateTime lastProtoDateTime VARCHAR(50), "
                "CHANGE lastPogoRestart lastPogoRestart VARCHAR(50), "
                "CHANGE init init TEXT NULL, "
                "CHANGE restartCounter restartCounter TEXT NULL")
            try:
                self._dbwrapper.execute(alter_query, commit=True)
            except Exception as e:
                logger.info("Unexpected error: {}", e)

        if self._version < 8:
            alter_query = ("ALTER TABLE trs_quest "
                           "ADD quest_template VARCHAR(100) NULL DEFAULT NULL "
                           "AFTER quest_reward")
            try:
                self._dbwrapper.execute(alter_query, commit=True)
            except Exception as e:
                logger.exception("Unexpected error: {}", e)

        if self._version < 9:
            alter_query = (
                "UPDATE trs_quest "
                "SET quest_condition=REPLACE(quest_condition,'\\\','\"'),"
                " quest_reward=REPLACE(quest_reward,'\\\','\"')")
            try:
                self._dbwrapper.execute(alter_query, commit=True)
            except Exception as e:
                logger.exception("Unexpected error: {}", e)
        self.set_version(current_version)
Ejemplo n.º 7
0
 async def run(self):
     try:
         self.listen(self.port)
         logger.info("Web server listening on port {}".format(self.port))
     except Exception as e:
         logger.error("Could not start web server: {}".format(e))
Ejemplo n.º 8
0
    def __check_raid_line(self, filename, identifier, communicator, leftSide=False, clickinvers=False):
        logger.debug("__check_raid_line: Reading lines")
        if leftSide:
            logger.debug("__check_raid_line: Check nearby open ")
        try:
            screenshot_read = cv2.imread(filename)
        except Exception:
            logger.error("Screenshot corrupted :(")
            return False
        if screenshot_read is None:
            logger.error("Screenshot corrupted :(")
            return False

        if self.__read_circle_count(os.path.join('', filename), identifier, float(11), communicator, xcord=False, crop=True,
                                    click=False, canny=True) == -1:
            logger.debug("__check_raid_line: Not active")
            return False

        height, width, _ = screenshot_read.shape
        screenshot_read = screenshot_read[int(height / 2) - int(height / 3):int(height / 2) + int(height / 3),
                                        int(0):int(width)]
        gray = cv2.cvtColor(screenshot_read, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (5, 5), 0)
        logger.debug("__check_raid_line: Determined screenshot scale: " +
                     str(height) + " x " + str(width))
        edges = cv2.Canny(gray, 50, 150, apertureSize=3)
        maxLineLength = width / 3.30 + width * 0.03
        logger.debug("__check_raid_line: MaxLineLength:" + str(maxLineLength))
        minLineLength = width / 6.35 - width * 0.03
        logger.debug("__check_raid_line: MinLineLength:" + str(minLineLength))
        maxLineGap = 50

        lines = cv2.HoughLinesP(edges, rho=1, theta=math.pi / 180, threshold=70, minLineLength=minLineLength,
                                maxLineGap=2)
        if lines is None:
            return False
        for line in lines:
            for x1, y1, x2, y2 in line:
                if not leftSide:
                    if y1 == y2 and (x2 - x1 <= maxLineLength) and (
                            x2 - x1 >= minLineLength) and x1 > width / 2 and x2 > width / 2 and y1 < (height / 2):
                        logger.debug("__check_raid_line: Raid-tab is active - Line lenght: " + str(
                            x2 - x1) + "px Coords - X: " + str(x1) + " " + str(x2) + " Y: " + str(y1) + " " + str(y2))
                        return True
                    # else: logger.debug("__check_raid_line: Raid-tab is not active - Line lenght: " + str(x2-x1) + "px
                    # Coords - X: " + str(x1) + " " + str(x2) + " Y: " + str(y1) + " " + str(y2)) return False
                else:
                    if y1 == y2 and (x2 - x1 <= maxLineLength) and (
                            x2 - x1 >= minLineLength) and ((x1 < width / 2 and x2 < width / 2) or (x1 < width / 2 and x2 > width / 2)) and y1 < (height / 2):
                        logger.debug(
                            "__check_raid_line: Nearby is active - but not Raid-Tab")
                        if clickinvers:
                            xRaidTab = int(width - (x2 - x1))
                            yRaidTab = int(
                                (int(height / 2) - int(height / 3) + y1) * 0.9)
                            logger.debug('__check_raid_line: open Raid-Tab')
                            communicator.click(xRaidTab, yRaidTab)
                            time.sleep(3)
                        return True
                    # else:
                    # logger.debug("__check_raid_line: Nearby not active - but maybe Raid-tab")
                    # return False
        logger.debug("__check_raid_line: Not active")
        return False
Ejemplo n.º 9
0
    async def __register(self, websocket_client_connection):
        try:
            origin = str(
                websocket_client_connection.request_headers.get_all("Origin")
                [0])
        except IndexError:
            logger.warning(
                "Client from {} tried to connect without Origin header",
                str(
                    websocket_client_connection.request_headers.get_all(
                        "Origin")[0]))
            return False
        if not self.__data_manager.is_device_active(origin):
            return (
                False,
                'Origin %s is currently paused.  Unpause through MADmin to begin working'
                % origin)
        logger.info("Client {} registering", str(origin))
        if self.__mapping_manager is None or origin not in self.__mapping_manager.get_all_devicemappings(
        ).keys():
            logger.warning(
                "Register attempt of unknown origin: {}. "
                "Have you forgot to hit 'APPLY SETTINGS' in MADmin?".format(
                    origin))
            return False

        if origin in self.__users_connecting:
            logger.info("Client {} is already connecting".format(origin))
            return False

        auths = self.__mapping_manager.get_auths()
        if auths:
            try:
                authBase64 = str(
                    websocket_client_connection.request_headers.get_all(
                        "Authorization")[0])
            except IndexError:
                logger.warning(
                    "Client from {} tried to connect without auth header",
                    str(
                        websocket_client_connection.request_headers.get_all(
                            "Origin")[0]))
                return False

        async with self.__users_mutex:
            logger.debug("Checking if {} is already present", str(origin))
            if origin in self.__current_users:
                logger.warning(
                    "Worker with origin {} is already running, killing the running one and have client reconnect",
                    str(origin))
                self.__current_users.get(origin)[1].stop_worker()
                ## todo: do this better :D
                logger.debug(
                    "Old worker thread is still alive - waiting 20 seconds")
                await asyncio.sleep(20)
                logger.info("Reconnect ...")
                return

            self.__users_connecting.append(origin)

        # reset pref. error counter if exist
        await self.__reset_fail_counter(origin)
        try:
            if auths and authBase64 and not check_auth(authBase64, self.args,
                                                       auths):
                logger.warning(
                    "Invalid auth details received from {}",
                    str(
                        websocket_client_connection.request_headers.get_all(
                            "Origin")[0]))
                return False
            logger.info("Starting worker {}".format(origin))
            if self._configmode:
                worker = WorkerConfigmode(
                    self.args,
                    origin,
                    self,
                    walker=None,
                    mapping_manager=self.__mapping_manager,
                    mitm_mapper=self.__mitm_mapper,
                    db_wrapper=self.__db_wrapper,
                    routemanager_name=None)
                logger.debug("Starting worker for {}", str(origin))
                new_worker_thread = Thread(name='worker_%s' % origin,
                                           target=worker.start_worker)
                async with self.__users_mutex:
                    self.__current_users[origin] = [
                        new_worker_thread, worker, websocket_client_connection,
                        0
                    ]
                return True

            last_known_state = {}
            client_mapping = self.__mapping_manager.get_devicemappings_of(
                origin)
            devicesettings = self.__mapping_manager.get_devicesettings_of(
                origin)
            logger.info("Setting up routemanagers for {}", str(origin))

            if client_mapping.get("walker", None) is not None:
                if devicesettings is not None and "walker_area_index" not in devicesettings:
                    logger.debug("Initializing devicesettings")
                    self.__mapping_manager.set_devicesetting_value_of(
                        origin, 'walker_area_index', 0)
                    self.__mapping_manager.set_devicesetting_value_of(
                        origin, 'finished', False)
                    self.__mapping_manager.set_devicesetting_value_of(
                        origin, 'last_action_time', None)
                    self.__mapping_manager.set_devicesetting_value_of(
                        origin, 'last_cleanup_time', None)
                    self.__mapping_manager.set_devicesetting_value_of(
                        origin, 'job', False)
                    await asyncio.sleep(
                        1
                    )  # give the settings a moment... (dirty "workaround" against race condition)
                walker_index = devicesettings.get('walker_area_index', 0)

                if walker_index > 0:
                    # check status of last area
                    if not devicesettings.get('finished', False):
                        logger.info(
                            'Something wrong with last round - get back to old area'
                        )
                        walker_index -= 1
                        self.__mapping_manager.set_devicesetting_value_of(
                            origin, 'walker_area_index', walker_index)
                        # devicesettings['walker_area_index'] = walker_index

                walker_area_array = client_mapping["walker"]
                walker_settings = walker_area_array[walker_index]

                # preckeck walker setting
                while not pre_check_value(
                        walker_settings) and walker_index - 1 <= len(
                            walker_area_array):
                    walker_area_name = walker_area_array[walker_index][
                        'walkerarea']
                    logger.info(
                        '{} not using area {} - Walkervalue out of range',
                        str(origin),
                        str(
                            self.__mapping_manager.routemanager_get_name(
                                walker_area_name)))
                    if walker_index >= len(walker_area_array) - 1:
                        logger.error(
                            'Could not find any working area at this time - check your mappings for device: {}',
                            str(origin))
                        walker_index = 0
                        self.__mapping_manager.set_devicesetting_value_of(
                            origin, 'walker_area_index', walker_index)
                        walker_settings = walker_area_array[walker_index]
                        await websocket_client_connection.close()
                        return
                    walker_index += 1
                    self.__mapping_manager.set_devicesetting_value_of(
                        origin, 'walker_area_index', walker_index)
                    walker_settings = walker_area_array[walker_index]

                devicesettings = self.__mapping_manager.get_devicesettings_of(
                    origin)
                logger.debug("Checking walker_area_index length")
                if (devicesettings.get("walker_area_index", None) is None
                        or devicesettings['walker_area_index'] >=
                        len(walker_area_array)):
                    # check if array is smaller than expected - f.e. on the fly changes in mappings.json
                    self.__mapping_manager.set_devicesetting_value_of(
                        origin, 'walker_area_index', 0)
                    self.__mapping_manager.set_devicesetting_value_of(
                        origin, 'finished', False)
                    walker_index = 0

                walker_area_name = walker_area_array[walker_index][
                    'walkerarea']

                if walker_area_name not in self.__mapping_manager.get_all_routemanager_names(
                ):
                    await websocket_client_connection.close()
                    raise WrongAreaInWalker()

                logger.debug('Devicesettings {}: {}', str(origin),
                             devicesettings)
                logger.info(
                    '{} using walker area {} [{}/{}]', str(origin),
                    str(
                        self.__mapping_manager.routemanager_get_name(
                            walker_area_name)), str(walker_index + 1),
                    str(len(walker_area_array)))
                walker_routemanager_mode = self.__mapping_manager.routemanager_get_mode(
                    walker_area_name)
                self.__mapping_manager.set_devicesetting_value_of(
                    origin, 'walker_area_index', walker_index + 1)
                self.__mapping_manager.set_devicesetting_value_of(
                    origin, 'finished', False)
                if walker_index >= len(walker_area_array) - 1:
                    self.__mapping_manager.set_devicesetting_value_of(
                        origin, 'walker_area_index', 0)

                # set global mon_iv
                routemanager_settings = self.__mapping_manager.routemanager_get_settings(
                    walker_area_name)
                if routemanager_settings is not None:
                    client_mapping['mon_ids_iv'] =\
                        self.__mapping_manager.get_monlist(routemanager_settings.get("mon_ids_iv", None),
                                                           walker_area_name)
            else:
                walker_routemanager_mode = None

            if "last_location" not in devicesettings:
                devicesettings['last_location'] = Location(0.0, 0.0)

            logger.debug("Setting up worker for {}", str(origin))
            worker = None
            if walker_routemanager_mode is None:
                pass
            elif walker_routemanager_mode in [
                    "raids_mitm", "mon_mitm", "iv_mitm"
            ]:
                worker = WorkerMITM(
                    self.args,
                    origin,
                    last_known_state,
                    self,
                    routemanager_name=walker_area_name,
                    mitm_mapper=self.__mitm_mapper,
                    mapping_manager=self.__mapping_manager,
                    db_wrapper=self.__db_wrapper,
                    pogo_window_manager=self.__pogoWindowManager,
                    walker=walker_settings)
            elif walker_routemanager_mode in ["pokestops"]:
                worker = WorkerQuests(
                    self.args,
                    origin,
                    last_known_state,
                    self,
                    routemanager_name=walker_area_name,
                    mitm_mapper=self.__mitm_mapper,
                    mapping_manager=self.__mapping_manager,
                    db_wrapper=self.__db_wrapper,
                    pogo_window_manager=self.__pogoWindowManager,
                    walker=walker_settings)
            elif walker_routemanager_mode in ["idle"]:
                worker = WorkerConfigmode(
                    self.args,
                    origin,
                    self,
                    walker=walker_settings,
                    mapping_manager=self.__mapping_manager,
                    mitm_mapper=self.__mitm_mapper,
                    db_wrapper=self.__db_wrapper,
                    routemanager_name=walker_area_name)
            else:
                logger.error("Mode not implemented")
                sys.exit(1)

            if worker is None:
                logger.error(
                    "Invalid walker mode for {}. Closing connection".format(
                        str(origin)))
                await websocket_client_connection.close()
            else:
                logger.debug("Starting worker for {}", str(origin))
                new_worker_thread = Thread(name='worker_%s' % origin,
                                           target=worker.start_worker)

                new_worker_thread.daemon = True
                async with self.__users_mutex:
                    self.__current_users[origin] = [
                        new_worker_thread, worker, websocket_client_connection,
                        0
                    ]
                new_worker_thread.start()
        except WrongAreaInWalker:
            logger.error('Unknown Area in Walker settings - check config')
            await websocket_client_connection.close()
        except Exception:
            logger.opt(exception=True).error(
                "Other unhandled exception during registration of {}.", origin)
            await websocket_client_connection.close()
        finally:
            async with self.__users_mutex:
                self.__users_connecting.remove(origin)
            await asyncio.sleep(5)
        return True
Ejemplo n.º 10
0
    device_mappings = mapping_parser.get_devicemappings()
    routemanagers = mapping_parser.get_routemanagers()
    auths = mapping_parser.get_auths()
    return (device_mappings, routemanagers, auths)


if __name__ == "__main__":
    # TODO: globally destroy all threads upon sys.exit() for example
    install_thread_excepthook()

    if args.db_method == "rm":
        db_wrapper = RmWrapper(args)
    elif args.db_method == "monocle":
        db_wrapper = MonocleWrapper(args)
    else:
        logger.error("Invalid db_method in config. Exiting")
        sys.exit(1)
    db_wrapper.create_hash_database_if_not_exists()
    db_wrapper.check_and_create_spawn_tables()
    db_wrapper.create_quest_database_if_not_exists()
    db_wrapper.create_status_database_if_not_exists()
    db_wrapper.create_usage_database_if_not_exists()
    version = MADVersion(args, db_wrapper)
    version.get_version()

    if args.clean_hash_database:
        logger.info('Cleanup Hash Database and www_hash folder')
        db_wrapper.delete_hash_table('999', '')
        for file in glob.glob("ocr/www_hash/*.jpg"):
            os.remove(file)
        sys.exit(0)
Ejemplo n.º 11
0
        deps = f.readlines()
        try:
            pkg_resources.require(deps)
        except pkg_resources.VersionConflict as version_error:
            logger.error(
                "Some dependencies aren't met. Required: {} (Installed: {})",
                version_error.req, version_error.dist)
            sys.exit(1)


if __name__ == "__main__":
    check_dependencies()

    if not os.path.exists(args.mappings):
        logger.error(
            "Couldn't find configuration file. Please run 'configmode.py' instead, if this is the first time starting MAD."
        )
        sys.exit(1)

    # TODO: globally destroy all threads upon sys.exit() for example
    install_thread_excepthook()

    db_wrapper, db_wrapper_manager = DbFactory.get_wrapper(args)
    wrong_modes = db_wrapper.running_mysql_modes()
    if len(wrong_modes) > 0:
        logger.error(
            "Your MySQL/MariaDB sql_mode settings needs an adjustment.")
        logger.error("Please drop those settings: {}.", ", ".join(wrong_modes))
        logger.error(
            "More info: https://mad-docs.readthedocs.io/en/latest/common-issues/faq/#sql-mode-error-mysql-strict-mode-mysql-mode"
        )
Ejemplo n.º 12
0
    def start_update(self):

        if self._version < 1:
            logger.info('Execute Update for Version 1')
            # Adding quest_reward for PMSF ALT
            if self.dbwrapper.check_column_exists('trs_quest',
                                                  'quest_reward') == 0:
                alter_query = (
                    "ALTER TABLE trs_quest "
                    "ADD quest_reward VARCHAR(500) NULL AFTER quest_condition")
                try:
                    self.dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

            # Adding quest_task = ingame quest conditions
            if self.dbwrapper.check_column_exists('trs_quest',
                                                  'quest_task') == 0:
                alter_query = (
                    "ALTER TABLE trs_quest "
                    "ADD quest_task VARCHAR(150) NULL AFTER quest_reward")
                try:
                    self.dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

            # Adding form column if it doesnt exist
            if self._application_args.db_method == "rm":
                alter_query = ("ALTER TABLE raid "
                               "ADD form smallint(6) DEFAULT NULL")
                column_exist = self.dbwrapper.check_column_exists(
                    'raid', 'form')
            else:
                logger.error("Invalid db_method in config. Exiting")
                sys.exit(1)

            if column_exist == 0:
                try:
                    self.dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

        if self._version < 2:
            alter_query = ("ALTER TABLE trs_quest "
                           "CHANGE quest_reward "
                           "quest_reward VARCHAR(1000) NULL DEFAULT NULL")
            try:
                self.dbwrapper.execute(alter_query, commit=True)
            except Exception as e:
                logger.info("Unexpected error: {}", e)
        if self._version < 7:
            alter_query = ("ALTER TABLE trs_status "
                           "ADD lastPogoReboot varchar(50) NULL DEFAULT NULL")
            column_exist = self.dbwrapper.check_column_exists(
                'trs_status', 'lastPogoReboot')
            if column_exist == 0:
                try:
                    self.dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

            alter_query = ("ALTER TABLE trs_status "
                           "ADD globalrebootcount int(11) NULL DEFAULT '0'")
            column_exist = self.dbwrapper.check_column_exists(
                'trs_status', 'globalrebootcount')
            if column_exist == 0:
                try:
                    self.dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

            alter_query = ("ALTER TABLE trs_status "
                           "ADD globalrestartcount int(11) NULL DEFAULT '0'")
            column_exist = self.dbwrapper.check_column_exists(
                'trs_status', 'globalrestartcount')
            if column_exist == 0:
                try:
                    self.dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

            alter_query = ("ALTER TABLE trs_status CHANGE lastPogoRestart "
                           "lastPogoRestart VARCHAR(50) NULL DEFAULT NULL")
            try:
                self.dbwrapper.execute(alter_query, commit=True)
            except Exception as e:
                logger.info("Unexpected error: {}", e)

            alter_query = (
                "ALTER TABLE trs_status "
                "CHANGE currentPos currentPos VARCHAR(50) NULL DEFAULT NULL, "
                "CHANGE lastPos lastPos VARCHAR(50) NULL DEFAULT NULL, "
                "CHANGE routePos routePos INT(11) NULL DEFAULT NULL, "
                "CHANGE routeMax routeMax INT(11) NULL DEFAULT NULL, "
                "CHANGE rebootingOption rebootingOption TEXT NULL, "
                "CHANGE rebootCounter rebootCounter INT(11) NULL DEFAULT NULL, "
                "CHANGE routemanager routemanager VARCHAR(255) NULL DEFAULT NULL, "
                "CHANGE lastProtoDateTime lastProtoDateTime VARCHAR(50), "
                "CHANGE lastPogoRestart lastPogoRestart VARCHAR(50), "
                "CHANGE init init TEXT NULL, "
                "CHANGE restartCounter restartCounter TEXT NULL")
            try:
                self.dbwrapper.execute(alter_query, commit=True)
            except Exception as e:
                logger.info("Unexpected error: {}", e)

        if self._version < 8:
            alter_query = ("ALTER TABLE trs_quest "
                           "ADD quest_template VARCHAR(100) NULL DEFAULT NULL "
                           "AFTER quest_reward")
            column_exist = self.dbwrapper.check_column_exists(
                'trs_quest', 'quest_template')
            if column_exist == 0:
                try:
                    self.dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.exception("Unexpected error: {}", e)

        if self._version < 9:
            alter_query = (
                "UPDATE trs_quest "
                "SET quest_condition=REPLACE(quest_condition,'\\\"','\"'),"
                " quest_reward=REPLACE(quest_reward,'\\\"','\"')")
            try:
                self.dbwrapper.execute(alter_query, commit=True)
            except Exception as e:
                logger.exception("Unexpected error: {}", e)

        if self._version < 10:
            query = ("CREATE TABLE IF NOT EXISTS trs_s2cells ( "
                     "id bigint(20) unsigned NOT NULL, "
                     "level int(11) NOT NULL, "
                     "center_latitude double NOT NULL, "
                     "center_longitude double NOT NULL, "
                     "updated int(11) NOT NULL, "
                     "PRIMARY KEY (id)) ")
            try:
                self.dbwrapper.execute(query, commit=True)
            except Exception as e:
                logger.exception("Unexpected error: {}", e)

        if self._version < 11:
            query = ("ALTER TABLE trs_stats_detect_raw "
                     "ADD is_shiny TINYINT(1) NOT NULL DEFAULT '0' "
                     "AFTER count")
            column_exist = self.dbwrapper.check_column_exists(
                'trs_stats_detect_raw', 'is_shiny')
            if column_exist == 0:
                try:
                    self.dbwrapper.execute(query, commit=True)
                except Exception as e:
                    logger.exception("Unexpected error: {}", e)

        if self._version < 12:
            query = ("ALTER TABLE trs_stats_detect_raw "
                     "ADD INDEX typeworker (worker, type_id)")
            index_exist = self.dbwrapper.check_index_exists(
                'trs_stats_detect_raw', 'typeworker')

            if index_exist >= 1:
                query = (
                    "ALTER TABLE trs_stats_detect_raw DROP INDEX typeworker, ADD INDEX typeworker (worker, type_id)"
                )
            try:
                self.dbwrapper.execute(query, commit=True)
            except Exception as e:
                logger.exception("Unexpected error: {}", e)

            query = ("ALTER TABLE trs_stats_detect_raw "
                     "ADD INDEX shiny (is_shiny)")
            index_exist = self.dbwrapper.check_index_exists(
                'trs_stats_detect_raw', 'shiny')

            if index_exist >= 1:
                query = (
                    "ALTER TABLE trs_stats_detect_raw DROP INDEX shiny, ADD INDEX shiny (is_shiny)"
                )
            try:
                self.dbwrapper.execute(query, commit=True)
            except Exception as e:
                logger.exception("Unexpected error: {}", e)

        if self._version < 13:
            # Adding current_sleep for worker status
            if self.dbwrapper.check_column_exists('trs_status',
                                                  'currentSleepTime') == 0:
                query = ("ALTER TABLE trs_status "
                         "ADD currentSleepTime INT(11) NOT NULL DEFAULT 0")
                try:
                    self.dbwrapper.execute(query, commit=True)
                except Exception as e:
                    logger.exception("Unexpected error: {}", e)

        if self._version < 14:
            update_order = [
                'monivlist', 'auth', 'devicesettings', 'areas', 'walker',
                'devices'
            ]
            old_data = {}
            new_data = {}
            cache = {}
            target = '%s.bk' % (self._application_args.mappings, )
            try:
                shutil.copy(self._application_args.mappings, target)
            except IOError:
                logger.exception('Unable to clone configuration. Exiting')
                sys.exit(1)
            with open(self._application_args.mappings, 'rb') as fh:
                old_data = json.load(fh)

            if "migrated" in old_data and old_data["migrated"] is True:
                with open(self._application_args.mappings, 'w') as outfile:
                    json.dump(old_data, outfile, indent=4, sort_keys=True)
            else:
                walkerarea = 'walkerarea'
                walkerarea_ind = 0
                for key in update_order:
                    try:
                        entries = old_data[key]
                    except Exception:
                        entries = []
                    cache[key] = {}
                    index = 0
                    new_data[key] = {'index': index, 'entries': {}}
                    if key == 'walker':
                        new_data[walkerarea] = {'index': index, 'entries': {}}

                    for entry in entries:
                        if key == 'monivlist':
                            cache[key][entry['monlist']] = index
                        if key == 'devicesettings':
                            cache[key][entry['devicepool']] = index
                        elif key == 'areas':
                            cache[key][entry['name']] = index
                            try:
                                mon_list = entry['settings']['mon_ids_iv']
                                if type(mon_list) is list:
                                    monlist_ind = new_data['monivlist'][
                                        'index']
                                    new_data['monivlist']['entries'][index] = {
                                        'monlist': 'Update List',
                                        'mon_ids_iv': mon_list
                                    }
                                    entry['settings'][
                                        'mon_ids_iv'] = '/api/monivlist/%s' % (
                                            monlist_ind)
                                    new_data['monivlist']['index'] += 1
                                else:
                                    try:
                                        name = mon_list
                                        uri = '/api/monivlist/%s' % (
                                            cache['monivlist'][name])
                                        entry['settings']['mon_ids_iv'] = uri
                                    except Exception:
                                        # No name match.  Maybe an old record so lets toss it
                                        del entry['settings']['mon_ids_iv']
                            except KeyError:
                                # Monlist is not defined for the area
                                pass
                            except Exception:
                                # No monlist specified
                                pass
                        elif key == 'walker':
                            cache[key][entry['walkername']] = index
                            valid_areas = []
                            if 'setup' in entry:
                                for ind, area in enumerate(entry['setup']):
                                    try:
                                        area['walkerarea'] = '/api/area/%s' % (
                                            cache['areas'][area['walkerarea']],
                                        )
                                    except KeyError:
                                        # The area no longer exists.  Remove from the path
                                        pass
                                    else:
                                        new_data[walkerarea]['entries'][
                                            walkerarea_ind] = area
                                        valid_areas.append(
                                            '/api/walkerarea/%s' %
                                            walkerarea_ind)
                                        walkerarea_ind += 1
                                entry['setup'] = valid_areas
                                new_data[walkerarea]['index'] = walkerarea_ind
                            else:
                                entry['setup'] = []
                        elif key == 'devices':
                            if 'pool' in entry:
                                try:
                                    entry['pool'] = '/api/devicesetting/%s' % (
                                        cache['devicesettings'][entry['pool']],
                                    )
                                except Exception:
                                    if entry['pool'] is not None:
                                        logger.error(
                                            'DeviceSettings {} is not valid',
                                            entry['pool'])
                                    del entry['pool']
                            try:
                                entry['walker'] = '/api/walker/%s' % (
                                    cache['walker'][entry['walker']], )
                            except Exception:
                                # The walker no longer exists.  Skip the device
                                continue
                        new_data[key]['entries'][index] = entry
                        index += 1
                    new_data[key]['index'] = index

                new_data['migrated'] = True

                with open(self._application_args.mappings, 'w') as outfile:
                    json.dump(new_data, outfile, indent=4, sort_keys=True)

        self.set_version(current_version)
Ejemplo n.º 13
0
    with open(args.mappings, 'w') as outfile:
        json.dump(newfile, outfile, indent=4, sort_keys=True)


def start_madmin(args, db_wrapper: DbWrapperBase, ws_server,
                 mapping_manager: MappingManager):
    from madmin.madmin import madmin_start
    madmin_start(args, db_wrapper, ws_server, mapping_manager)


if __name__ == "__main__":
    logger.info('Start MAD configmode - pls wait')
    filename = os.path.join('configs', 'config.ini')
    if not os.path.exists(filename):
        logger.error(
            'config.ini file not found - check configs folder and copy .example'
        )
        sys.exit(1)

    filename = args.mappings
    if not os.path.exists(filename):
        generate_mappingjson()

    db_wrapper, db_wrapper_manager = DbFactory.get_wrapper(args)

    db_wrapper.create_hash_database_if_not_exists()
    db_wrapper.check_and_create_spawn_tables()
    db_wrapper.create_quest_database_if_not_exists()
    db_wrapper.create_status_database_if_not_exists()
    db_wrapper.create_usage_database_if_not_exists()
    db_wrapper.create_statistics_databases_if_not_exists()
Ejemplo n.º 14
0
    def clear_box(self, delayadd):
        stop_inventory_clear = Event()
        stop_screen_clear = Event()
        logger.info('Cleanup Box')
        not_allow = ('Gift', 'Geschenk', 'Glücksei', 'Glucks-Ei', 'Glücks-Ei', 'Lucky Egg', 'FrenchNameForLuckyEgg',
                     'Cadeau', 'Appareil photo', 'Wunderbox', 'Mystery Box', 'Boîte Mystère')
        x, y = self._resocalc.get_close_main_button_coords(self)[0], self._resocalc.get_close_main_button_coords(self)[
            1]
        self._communicator.click(int(x), int(y))
        time.sleep(1 + int(delayadd))
        x, y = self._resocalc.get_item_menu_coords(
                self)[0], self._resocalc.get_item_menu_coords(self)[1]
        self._communicator.click(int(x), int(y))
        time.sleep(2 + int(delayadd))
        _data_err_counter = 0
        _pos = 1
        text_x1, text_x2, text_y1, text_y2 = self._resocalc.get_delete_item_text(
                self)
        x, y = self._resocalc.get_delete_item_coords(
                self)[0], self._resocalc.get_delete_item_coords(self)[1]
        click_x1, click_x2, click_y = self._resocalc.get_swipe_item_amount(self)[0], \
                                      self._resocalc.get_swipe_item_amount(self)[1], \
                                      self._resocalc.get_swipe_item_amount(self)[2]
        delrounds = 0
        first_round = True
        delete_allowed = False
        error_counter = 0

        while int(delrounds) <= 8 and not stop_inventory_clear.is_set():

            trash = 0
            if not first_round and not delete_allowed:
                error_counter += 1
                if error_counter > 3:
                    stop_inventory_clear.set()
                logger.warning('Find no item to delete: {}', str(error_counter))
                self._communicator.touchandhold(int(200), int(300), int(200), int(100))
                time.sleep(2)

            trashcancheck = self._get_trash_positions()
            if trashcancheck is None:
                logger.error('Could not find any trashcan - abort')
                return
            logger.info("Found {} trashcan(s) on screen", len(trashcancheck))
            first_round = False
            delete_allowed = False
            stop_screen_clear.clear()

            while int(trash) <= len(trashcancheck) - 1 and not stop_screen_clear.is_set():
                check_y_text_starter = int(trashcancheck[trash].y)
                check_y_text_ending = int(trashcancheck[trash].y) + self._resocalc.get_inventory_text_diff(self)

                try:
                    item_text = self._pogoWindowManager.get_inventory_text(self.get_screenshot_path(),
                                                                       self._id, text_x1, text_x2, check_y_text_ending,
                                                                       check_y_text_starter)

                    logger.info("Found item {}", str(item_text))
                    if item_text in not_allow:
                        logger.info('Could not delete this item - check next one')
                        trash += 1
                    else:
                        logger.info('Could delete this item')
                        self._communicator.click(int(trashcancheck[trash].x), int(trashcancheck[trash].y))
                        time.sleep(1 + int(delayadd))

                        self._communicator.touchandhold(
                            click_x1, click_y, click_x2, click_y)
                        time.sleep(1)

                        delx, dely = self._resocalc.get_confirm_delete_item_coords(self)[0], \
                                     self._resocalc.get_confirm_delete_item_coords(self)[1]
                        curTime = time.time()
                        self._communicator.click(int(delx), int(dely))

                        data_received = self._wait_for_data(
                            timestamp=curTime, proto_to_wait_for=4, timeout=35)

                        if data_received != LatestReceivedType.UNDEFINED:
                            if data_received == LatestReceivedType.CLEAR:
                                delrounds += 1
                                stop_screen_clear.set()
                                delete_allowed = True
                        else:
                            logger.error('Unknown error clearing out {}', str(item_text))
                            stop_screen_clear.set()
                            stop_inventory_clear.set()

                except UnicodeEncodeError as e:
                    logger.warning('Found some text that was not unicode!')
                    stop_inventory_clear.set()
                    stop_screen_clear.set()
                    pass

        x, y = self._resocalc.get_close_main_button_coords(self)[0], self._resocalc.get_close_main_button_coords(self)[
            1]
        self._communicator.click(int(x), int(y))
        time.sleep(1 + int(delayadd))
        return True
Ejemplo n.º 15
0
    def get_trash_click_positions(self, filename):
        if not os.path.isfile(filename):
            logger.error("get_trash_click_positions: {} does not exist", str(filename))
            return None

        return self.__thread_pool.apply_async(trash_image_matching, (filename,)).get()
Ejemplo n.º 16
0
    def __internal_look_for_button(self, filename, ratiomin, ratiomax, communicator, upper):
        logger.debug("lookForButton: Reading lines")
        disToMiddleMin = None
        try:
            screenshot_read = cv2.imread(filename)
            gray = cv2.cvtColor(screenshot_read, cv2.COLOR_BGR2GRAY)
        except:
            logger.error("Screenshot corrupted :(")
            return False

        if screenshot_read is None:
            logger.error("Screenshot corrupted :(")
            return False

        height, width, _ = screenshot_read.shape
        _widthold = float(width)
        logger.debug("lookForButton: Determined screenshot scale: " +
                     str(height) + " x " + str(width))

        # resize for better line quality
        # gray = cv2.resize(gray, (0,0), fx=width*0.001, fy=width*0.001)
        height, width = gray.shape
        factor = width / _widthold

        gray = cv2.GaussianBlur(gray, (3, 3), 0)
        edges = cv2.Canny(gray, 50, 200, apertureSize=3)
        # checking for all possible button lines

        maxLineLength = (width / ratiomin) + (width * 0.18)
        logger.debug("lookForButton: MaxLineLength:" + str(maxLineLength))
        minLineLength = (width / ratiomax) - (width * 0.02)
        logger.debug("lookForButton: MinLineLength:" + str(minLineLength))

        kernel = np.ones((2, 2), np.uint8)
        # kernel = np.zeros(shape=(2, 2), dtype=np.uint8)
        edges = cv2.morphologyEx(edges, cv2.MORPH_GRADIENT, kernel)

        maxLineGap = 50
        lineCount = 0
        lines = []
        _x = 0
        _y = height
        lines = cv2.HoughLinesP(edges, rho=1, theta=math.pi / 180, threshold=70, minLineLength=minLineLength,
                                maxLineGap=5)
        if lines is None:
            return False

        lines = self.check_lines(lines, height)

        _last_y = 0
        for line in lines:
            line = [line]
            for x1, y1, x2, y2 in line:

                if y1 == y2 and x2 - x1 <= maxLineLength and x2 - x1 >= minLineLength \
                        and y1 > height / 3 \
                        and (x2 - x1) / 2 + x1 < width / 2 + 50 and (x2 - x1) / 2 + x1 > width / 2 - 50:

                    lineCount += 1
                    disToMiddleMin_temp = y1 - (height / 2)
                    if upper:
                        if disToMiddleMin is None:
                            disToMiddleMin = disToMiddleMin_temp
                            click_y = y1 + 50
                            _last_y = y1
                            _x1 = x1
                            _x2 = x2
                        else:
                            if disToMiddleMin_temp < disToMiddleMin:
                                click_y = _last_y + ((y1 - _last_y) / 2)
                                _last_y = y1
                                _x1 = x1
                                _x2 = x2

                    else:
                        click_y = _last_y + ((y1 - _last_y) / 2)
                        _last_y = y1
                        _x1 = x1
                        _x2 = x2

                    logger.debug("lookForButton: Found Buttonline Nr. " + str(lineCount) + " - Line lenght: " + str(
                        x2 - x1) + "px Coords - X: " + str(x1) + " " + str(x2) + " Y: " + str(y1) + " " + str(y2))

        if 1 < lineCount <= 6:
            # recalculate click area for real resolution
            click_x = int(((width - _x2) + ((_x2 - _x1) / 2)) /
                          round(factor, 2))
            click_y = int(click_y)
            logger.debug('lookForButton: found Button - click on it')
            communicator.click(click_x, click_y)
            time.sleep(4)
            return True

        elif lineCount > 6:
            logger.debug('lookForButton: found to much Buttons :) - close it')
            communicator.click(int(width - (width / 7.2)),
                               int(height - (height / 12.19)))
            time.sleep(4)

            return True

        logger.debug('lookForButton: did not found any Button')
        return False
Ejemplo n.º 17
0
    async def __send_and_wait_internal(self,
                                       id,
                                       worker_instance,
                                       message,
                                       timeout,
                                       byte_command: int = None):
        async with self.__users_mutex:
            user_entry = self.__current_users.get(id, None)

        if user_entry is None or user_entry[
                1] != worker_instance and worker_instance != 'madmin':
            raise WebsocketWorkerRemovedException

        message_id = await self.__get_new_message_id()
        message_event = asyncio.Event()
        message_event.clear()

        await self.__set_request(message_id, message_event)

        if isinstance(message, str):
            to_be_sent: str = u"%s;%s" % (str(message_id), message)
            logger.debug("To be sent to {}: {}", id, to_be_sent.strip())
        elif byte_command is not None:
            to_be_sent: bytes = (int(message_id)).to_bytes(4, byteorder='big')
            to_be_sent += (int(byte_command)).to_bytes(4, byteorder='big')
            to_be_sent += message
            logger.debug("To be sent to {} (message ID: {}): {}", id,
                         message_id, str(to_be_sent[:10]))
        else:
            logger.fatal(
                "Tried to send invalid message (bytes without byte command or no byte/str passed)"
            )
            return None
        await self.__send(id, to_be_sent)

        # now wait for the response!
        result = None
        logger.debug("Timeout: {}", str(timeout))
        event_triggered = None
        try:
            event_triggered = await asyncio.wait_for(message_event.wait(),
                                                     timeout=timeout)
        except asyncio.TimeoutError as te:
            logger.warning("Timeout, increasing timeout-counter")
            # TODO: why is the user removed here?
            new_count = await self.__increase_fail_counter(id)
            if new_count > 5:
                logger.error(
                    "5 consecutive timeouts to {} or origin is not longer connected, cleanup",
                    str(id))
                await self.__internal_clean_up_user(id, None)
                await self.__reset_fail_counter(id)
                await self.__remove_request(message_id)
                raise WebsocketWorkerTimeoutException

        if event_triggered:
            logger.debug("Received answer in time, popping response")
            await self.__reset_fail_counter(id)
            await self.__remove_request(message_id)
            result = await self.__pop_response(message_id)
            if isinstance(result, str):
                logger.debug("Response to {}: {}", str(id),
                             str(result.strip()))
            else:
                logger.debug("Received binary data to {}, starting with {}",
                             str(id), str(result[:10]))
        return result
Ejemplo n.º 18
0
    async def __register(self, websocket_client_connection):
        logger.info("Client {} registering", str(
            websocket_client_connection.request_headers.get_all("Origin")[0]))
        if self.__stop_server.is_set():
            logger.info(
                "MAD is set to shut down, not accepting new connection")
            return False

        try:
            id = str(
                websocket_client_connection.request_headers.get_all("Origin")[0])
        except IndexError:
            logger.warning("Client from {} tried to connect without Origin header", str(
                websocket_client_connection.request_headers.get_all("Origin")[0]))
            return False

        if self.__auths:
            try:
                authBase64 = str(
                    websocket_client_connection.request_headers.get_all("Authorization")[0])
            except IndexError:
                logger.warning("Client from {} tried to connect without auth header", str(
                    websocket_client_connection.request_headers.get_all("Origin")[0]))
                return False

        self.__current_users_mutex.acquire()
        try:
            logger.debug("Checking if {} is already present", str(id))
            user_present = self.__current_users.get(id)
            if user_present is not None:
                logger.warning("Worker with origin {} is already running, killing the running one and have client reconnect",
                               str(websocket_client_connection.request_headers.get_all("Origin")[0]))
                user_present[1].stop_worker()
                return False
            elif self.__auths and authBase64 and not check_auth(authBase64, self.args, self.__auths):
                logger.warning("Invalid auth details received from {}", str(
                    websocket_client_connection.request_headers.get_all("Origin")[0]))
                return False

            if self._configmode:
                worker = WorkerConfigmode(self.args, id, self)
                logger.debug("Starting worker for {}", str(id))
                new_worker_thread = Thread(
                    name='worker_%s' % id, target=worker.start_worker)
                self.__current_users[id] = [
                    new_worker_thread, worker, websocket_client_connection, 0]
                return True

            last_known_state = {}
            client_mapping = self.__device_mappings[id]
            devicesettings = client_mapping["settings"]
            logger.info("Setting up routemanagers for {}", str(id))

            if client_mapping.get("walker", None) is not None:
                if "walker_area_index" not in devicesettings:
                    devicesettings['walker_area_index'] = 0
                    devicesettings['finished'] = False
                    devicesettings['last_action_time'] = None
                    devicesettings['last_cleanup_time'] = None

                walker_index = devicesettings.get('walker_area_index', 0)

                if walker_index > 0:
                    # check status of last area
                    if not devicesettings.get('finished', False):
                        logger.info(
                            'Something wrong with last round - get back to old area')
                        walker_index -= 1
                        devicesettings['walker_area_index'] = walker_index

                walker_area_array = client_mapping["walker"]
                walker_settings = walker_area_array[walker_index]

                # preckeck walker setting
                while not pre_check_value(walker_settings) and walker_index-1 <= len(walker_area_array):
                    walker_area_name = walker_area_array[walker_index]['walkerarea']
                    logger.info(
                        '{} dont using area {} - Walkervalue out of range', str(id), str(walker_area_name))
                    if walker_index >= len(walker_area_array) - 1:
                        logger.error(
                            'Dont find any working area - check your config')
                        walker_index = 0
                        devicesettings['walker_area_index'] = walker_index
                        walker_settings = walker_area_array[walker_index]
                        break
                    walker_index += 1
                    devicesettings['walker_area_index'] = walker_index
                    walker_settings = walker_area_array[walker_index]

                if devicesettings['walker_area_index'] >= len(walker_area_array):
                    # check if array is smaller then expected - f.e. on the fly changes in mappings.json
                    devicesettings['walker_area_index'] = 0
                    devicesettings['finished'] = False
                    walker_index = devicesettings.get('walker_area_index', 0)

                walker_area_name = walker_area_array[walker_index]['walkerarea']

                if walker_area_name not in self.__routemanagers:
                    raise WrongAreaInWalker()

                logger.debug('Devicesettings {}: {}', str(id), devicesettings)
                logger.info('{} using walker area {} [{}/{}]', str(id), str(
                    walker_area_name), str(walker_index+1), str(len(walker_area_array)))
                walker_routemanager = \
                    self.__routemanagers[walker_area_name].get(
                        "routemanager", None)
                devicesettings['walker_area_index'] += 1
                devicesettings['finished'] = False
                if walker_index >= len(walker_area_array) - 1:
                    devicesettings['walker_area_index'] = 0

                # set global mon_iv
                client_mapping['mon_ids_iv'] = \
                    self.__routemanagers[walker_area_name].get(
                        "routemanager").settings.get("mon_ids_iv", [])

            else:
                walker_routemanager = None

            if "last_location" not in devicesettings:
                devicesettings['last_location'] = Location(0.0, 0.0)

            logger.debug("Setting up worker for {}", str(id))

            if walker_routemanager is None:
                pass
            elif walker_routemanager.mode in ["raids_mitm", "mon_mitm", "iv_mitm"]:
                worker = WorkerMITM(self.args, id, last_known_state, self, walker_routemanager,
                                    self.__mitm_mapper, devicesettings, db_wrapper=self.__db_wrapper,
                                    pogoWindowManager=self.__pogoWindowManager, walker=walker_settings)
            elif walker_routemanager.mode in ["raids_ocr"]:
                from worker.WorkerOCR import WorkerOCR
                worker = WorkerOCR(self.args, id, last_known_state, self, walker_routemanager,
                                   devicesettings, db_wrapper=self.__db_wrapper,
                                   pogoWindowManager=self.__pogoWindowManager, walker=walker_settings)
            elif walker_routemanager.mode in ["pokestops"]:
                worker = WorkerQuests(self.args, id, last_known_state, self, walker_routemanager,
                                      self.__mitm_mapper, devicesettings, db_wrapper=self.__db_wrapper,
                                      pogoWindowManager=self.__pogoWindowManager, walker=walker_settings)
            elif walker_routemanager.mode in ["idle"]:
                worker = WorkerConfigmode(self.args, id, self)
            else:
                logger.error("Mode not implemented")
                sys.exit(1)

            logger.debug("Starting worker for {}", str(id))
            new_worker_thread = Thread(
                name='worker_%s' % id, target=worker.start_worker)

            new_worker_thread.daemon = False

            self.__current_users[id] = [new_worker_thread,
                                        worker, websocket_client_connection, 0]
            new_worker_thread.start()
        except WrongAreaInWalker:
            logger.error('Unknown Area in Walker settings - check config')
        finally:
            self.__current_users_mutex.release()

        return True
Ejemplo n.º 19
0
    def copyMons(pogoassets_path, db_wrapper):

        monList = []

        logger.info('Processing Pokemon Matching....')
        with open('raidmons.json') as f:
            data = json.load(f)

        monImgPath = os.getcwd() + '/ocr/mon_img/'
        filePath = os.path.dirname(monImgPath)

        if not os.path.exists(filePath):
            logger.info('ocr/mon_img directory created')
            os.makedirs(filePath)

        assetPath = pogoassets_path

        if not os.path.exists(assetPath):
            logger.error('PogoAssets not found')
            exit(0)

        for file in glob.glob(monImgPath + "*mon*.png"):
            os.remove(file)

        for mons in data:
            for mon in mons['DexID']:
                lvl = mons['Level']
                if str(mon).find("_") > -1:
                    mon_split = str(mon).split("_")
                    mon = mon_split[0]
                    frmadd = mon_split[1]
                else:
                    frmadd = "00"

                mon = '{:03d}'.format(int(mon))
                monList.append(mon)

                monFile = monImgPath + '_mon_' + \
                    str(mon) + '_' + str(lvl) + '.png'

                if not os.path.isfile(monFile):

                    monFileAsset = assetPath + '/pokemon_icons/pokemon_icon_' + \
                        str(mon) + '_' + frmadd + '.png'

                    if not os.path.isfile(monFileAsset):
                        logger.error('File ' + str(monFileAsset) +
                                     ' not found')
                        exit(0)

                    copyfile(monFileAsset, monFile)

                    image = Image.open(monFile)
                    image.convert("RGBA")
                    # Empty canvas colour (r,g,b,a)
                    canvas = Image.new('RGBA', image.size,
                                       (255, 255, 255, 255))
                    # Paste the image onto the canvas, using it's alpha channel as mask
                    canvas.paste(image, mask=image)
                    canvas.save(monFile, format="PNG")

                    monAsset = cv2.imread(monFile, 3)
                    height, width, channels = monAsset.shape
                    monAsset = cv2.inRange(monAsset, np.array([240, 240, 240]),
                                           np.array([255, 255, 255]))
                    cv2.imwrite(monFile, monAsset)
                    crop = cv2.imread(monFile, 3)
                    crop = crop[0:int(height), 0:int((width / 10) * 10)]
                    kernel = np.ones((2, 2), np.uint8)
                    crop = cv2.erode(crop, kernel, iterations=1)
                    kernel = np.ones((3, 3), np.uint8)
                    crop = cv2.morphologyEx(crop, cv2.MORPH_CLOSE, kernel)

                    #gray = cv2.cvtColor(crop,cv2.COLOR_BGR2GRAY)
                    #_,thresh = cv2.threshold(gray,1,255,cv2.THRESH_BINARY_INV)
                    #contours = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
                    #cnt = contours[0]
                    #x,y,w,h = cv2.boundingRect(cnt)
                    #crop = crop[y-1:y+h+1,x-1:x+w+1]
                    cv2.imwrite(monFile, crop)

        _monList = myList = '|'.join(map(str, monList))
        dbWrapper = db_wrapper
        dbWrapper.clear_hash_gyms(_monList)
Ejemplo n.º 20
0
def unsafe(action, *args, **kwargs):
    try:
        action(*args, **kwargs)
    except:
        logger.error(format_exc())
Ejemplo n.º 21
0

def start_madmin(args, db_wrapper: DbWrapper, ws_server,
                 mapping_manager: MappingManager, data_manager, deviceUpdater,
                 jobstatus):
    from madmin.madmin import madmin_start
    madmin_start(args, db_wrapper, ws_server, mapping_manager, data_manager,
                 deviceUpdater, jobstatus)


if __name__ == "__main__":
    logger.info('Starting MAD config mode')
    filename = os.path.join('configs', 'config.ini')
    if not os.path.exists(filename):
        logger.error(
            'config.ini file not found. Check configs folder and copy example config'
        )
        sys.exit(1)

    create_folder(args.file_path)
    create_folder(args.upload_path)

    db_wrapper, db_pool_manager = DbFactory.get_wrapper(args)

    instance_id = db_wrapper.get_instance_id()
    data_manager = utils.data_manager.DataManager(db_wrapper, instance_id)
    data_manager.clear_on_boot()
    version = MADVersion(args, data_manager)
    version.get_version()

    MappingManagerManager.register('MappingManager', MappingManager)