Ejemplo n.º 1
0
    def run(self) -> None:
        # logger.warning('This is a development server. Do not use it in a production deployment.')
        try:
            for task_name in self.deploy_cluster:
                try:
                    schedule.every(self.crontab['action']).minutes.do(self.push_task, task_name=task_name)
                    if ENABLE_DDT:
                        schedule.every(self.crontab['refresh']).minutes.do(self.rc.refresh,
                                                                           key_name=REDIS_SECRET_KEY.format(task_name))
                        logger.success(f"START DDT -- {task_name}")
                    else:
                        logger.warning(f'Not Authorized -- DDT({task_name})')
                    logger.success(f"START TASK -- {task_name}/crontab:{self.crontab['action']} minutes")

                except schedule.IntervalError:
                    logger.error('interval set error')

                # self.crontab['action'] += 5

            while True:
                schedule.run_pending()
                time.sleep(1)

        except Exception as err:
            logger.exception('Exception occurred ||{}'.format(err))
            noticer.send_email(msg='{}'.format(err), to_='self')
        except KeyboardInterrupt as err:
            logger.stop('Forced stop ||{}'.format(err))
Ejemplo n.º 2
0
    def update_session(self, sessionid, session):

        query = sql.SQL("""
            UPDATE {}
            SET
                slot = %(slot)s,
                ownerid = %(ownerid)s,
                model = %(model)s,
                customerid = %(customerid)s,
                due = %(due)s
            RETURNING id
        """).format(sql.Identifier(self.table))

        logger.info("Mogrify: {}".format(db.cursor.mogrify(query, session)))

        try:
            db.cursor.execute(query, session)
            db.conn.commit()
            fetch = db.cursor.fetchone()
            logger.debug("FETCH: {}".format(fetch))
            return fetch['id']
        except psycopg2.IntegrityError as e:
            logger.warning(e)
            db.conn.rollback()
            raise
        except psycopg2.ProgrammingError:
            logger.exception("!ERROR")
            db.conn.rollback()
            raise
        except Exception:
            db.conn.rollback()
            raise
Ejemplo n.º 3
0
async def send_message(user_id: int,
                       text: str,
                       disable_notification: bool = False) -> bool:
    """
    Safe messages sender
    :param user_id:
    :param text:
    :param disable_notification:
    :return:
    """
    try:
        await bot.send_message(user_id,
                               text,
                               parse_mode="html",
                               disable_notification=disable_notification)
    except exceptions.BotBlocked:
        log.error(f"Target [ID:{user_id}]: blocked by user")
    except exceptions.ChatNotFound:
        log.error(f"Target [ID:{user_id}]: invalid user ID")
    except exceptions.RetryAfter as e:
        log.error(
            f"Target [ID:{user_id}]: Flood limit is exceeded. Sleep {e.timeout} seconds."
        )
        await asyncio.sleep(e.timeout)
        return await send_message(user_id, text)  # Recursive call
    except exceptions.UserDeactivated:
        log.error(f"Target [ID:{user_id}]: user is deactivated")
    except exceptions.TelegramAPIError:
        log.exception(f"Target [ID:{user_id}]: failed")
    else:
        log.info(f"Target [ID:{user_id}]: success")
        return True
    return False
Ejemplo n.º 4
0
def INSERT_UPDATE_TRAN(SQL_STR):
	try:
		db.session.execute(SQL_STR)
		db.session.commit()
	except:
		logger.exception('exception')
		db.session.rollback()
Ejemplo n.º 5
0
 def post_message_in_channel(self, message, channel=config.WINLINE_ALERT_CHANNEL):
     """
     :param message:
     :param channel: channel name
     :return: True if message successfully, else - False
     """
     current_attempt = 1
     logger.info('Try to send message: \"{message}\" in channel \"{channel}\".'
                 .format(message=message, channel=channel))
     while current_attempt <= config.SEND_MESSAGE_ATTEMPT_MAX:
         try:
             self.bot.send_message(chat_id=channel, text='[BOT] %s' % message)
             logger.info('Message send successfully.')
             return True
         except Exception as e:
             logger.exception(
                 'Could not send message in channel \"{channel}\": {exception}. Attempt {current_attempt} in {max_attempt}'
                 .format(channel=channel,
                         message=message,
                         exception=e,
                         current_attempt=current_attempt,
                         max_attempt=config.SEND_MESSAGE_ATTEMPT_MAX))
             current_attempt += 1
             time.sleep(config.SEND_MESSAGE_ATTEMPT_TIMEOUT_SEC)
     else:
         logger.error('Could not send message in 5 attempts!!!')
         return False
Ejemplo n.º 6
0
def resume_task(task_id):
    """
    恢复任务
    :param task_id:
    :return:
    """
    task = TaskOpt.get_task_by_task_id(None, task_id)
    if not task:
        logger.error(
            'resume_task can not find the task, task id={}. '.format(task_id))
        return Result(res=False, msg='can not find the task')

    if task.status not in ['pausing', 'pending', 'running']:
        logger.error(
            'resume_task task is not running, task id={}'.format(task_id))
        return Result(res=False, msg='task is not running')

    logger.info('resume_task task id={}'.format(task_id))
    try:
        ret = g_bk_scheduler.resume_job(task.aps_id)
    except JobLookupError:
        logger.exception('resume_task, job have been removed.')
        return Result(res=False, msg='job have been removed.')

    # 如果任务已经过期则ret会是null
    if ret:
        task.status = 'running'
        return Result(res=True, msg='')
    else:
        task.status = 'failed'
        return Result(res=False, msg='task has already expired')
Ejemplo n.º 7
0
        def _snapshot(snapshot):
            x = CumulativeResult()
            build_doc = self._doc(index)
            cfg = self.repcfg.format(build_doc)
            for step in ("pre", "snapshot", "post"):
                state = registrar.dispatch(step)  # _TaskState Class
                state = state(get_src_build(), build_doc.get("_id"))
                logging.info(state)
                state.started()

                job = yield from self.job_manager.defer_to_thread(
                    self.pinfo.get_pinfo(step, snapshot),
                    partial(getattr(self, state.func), cfg, index, snapshot))
                try:
                    dx = yield from job
                    dx = StepResult(dx)

                except Exception as exc:
                    logging.exception(exc)
                    state.failed({}, exc)
                    raise exc
                else:
                    merge(x.data, dx.data)
                    logging.info(dx)
                    logging.info(x)
                    state.succeed({snapshot: x.data}, res=dx.data)
            return x
Ejemplo n.º 8
0
def pause_task(task_id):
    """
    暂停任务
    :param task_id:
    :return:
    """
    task = TaskOpt.get_task_by_task_id(None, task_id)
    if not task:
        logger.error(
            'pause_task can not find the task, task id={}. '.format(task_id))
        return Result(res=False, msg='can not find the task')

    if task.status not in ['pausing', 'pending', 'running']:
        logger.error(
            'pause_task but task is not running, task id={}. '.format(task_id))
        return Result(res=False, msg='task have is not running')

    logger.info('pause_task task id={}'.format(task_id))
    try:
        g_bk_scheduler.pause_job(task.aps_id)
    except JobLookupError:
        logger.exception('pause_task, job have been removed.')
        return Result(res=False, msg='pause_task, job have been removed.')

    task.status = 'pausing'
    return Result(res=True, msg='')
Ejemplo n.º 9
0
def process_updated_tasks():
    """
    处理任务状态变更
    :return:
    """
    global last_check_time
    logger.info(
        'process_updated_tasks, last check time={}'.format(last_check_time))
    # tasks = TaskOpt.get_all_need_check_task(last_time=last_check_time-timedelta(seconds=1))
    try:
        db_session = ScopedSession()
        last_time = last_check_time - timedelta(seconds=1)
        tasks = db_session.query(Task.id, Task.status, Task.last_update) \
            .filter(and_(Task.status.in_(('pausing', 'running', 'cancelled')), Task.last_update >= last_time)).all()
        last_check_time = datetime.now()
        for task_id, status, last_update in tasks:
            logger.info(
                'process_updated_tasks task id={}, status={}, last_update={}'.
                format(task_id, status, last_update))
            if last_update >= last_check_time:
                if status == 'cancelled':
                    cancel_task(task_id)
                elif status == 'pausing':
                    pause_task(task_id)
                elif status == 'running':
                    resume_task(task_id)

    except Exception as e:
        logger.exception(
            'process_updated_tasks catch exception e={}'.format(e))
        db_session.rollback()
    finally:
        ScopedSession.remove()
Ejemplo n.º 10
0
    def _call(self, func, *args, **kwargs):
        '''
        Wraps decorated function and watches for successes and failures

        Args:
            func(function): decorated function
            *args: args passed to decorated function
            **kwargs: kwargs passed to decorated function
        '''
        with self._lock:
            current_state = self._check_state()
            if current_state == OPEN:
                self._open_circuit_failure_count += 1
                raise ConnectionError("Open circuit")
            try:
                result = func(*args, **kwargs)
            except self._allowed_exceptions as e:
                logger.info("Encountered allowed exception {}".format(
                    e.__class__))
                return  # not a failure, but not a success
            except self._failure_exceptions:
                logger.exception("Caught pre-defined failure exception")
                self._on_failure()
            except Exception as e:
                logger.exception("Caught unhandled exception, incrementing "
                                 "failure count")
                if self._failure_exceptions:
                    logger.info("Encountered non-failure exception {}".format(
                        e.__class__))
                    return  # not a failure, but not a success
                else:
                    self._on_failure()
            else:
                logger.debug("Successfully completed wrapped function")
                self._parse_result(result)
Ejemplo n.º 11
0
    def run(self):
        logger.info("DO -- <{}>".format(self.__class__.__name__))

        api = self.set_spider_option()
        api.get(self.register_url)

        try:
            self.sign_up(api)

            self.wait(api, 20, "//div[@class='card-body']")

            # get v2ray link
            if self.hyper_params['v2ray']:
                self.load_any_subscribe(
                    api,
                    "//div[@class='buttons']//a[contains(@class,'v2ray')]",
                    'data-clipboard-text', 'v2ray')

            # get ssr link
            if self.hyper_params['ssr']:
                self.load_any_subscribe(
                    api,
                    """//a[@onclick="importSublink('ssr')"]/..//a[contains(@class,'copy')]""",
                    'data-clipboard-text', 'ssr')
        except WebDriverException as e:
            logger.exception(">>> Exception <{}> -- {}".format(
                self.__class__.__name__, e))
        finally:
            api.quit()
Ejemplo n.º 12
0
    def post(self):
        try:

            data = request.json
            Model_Apple = load_model("Model/Apple_model_inception.h5")
            im_b64 = data["image"]
            im_bytes = base64.b64decode(im_b64)  # im_bytes is a binary image
            im_file = BytesIO(im_bytes)  # convert image to file-like object
            img = Image.open(im_file)  # img is now PIL Image object
            img = img.resize((224, 224))
            # Preprocessing the image
            x = image.img_to_array(img)
            ## Scaling
            x = x / 255
            x = np.expand_dims(x, axis=0)
            preds = Model_Apple.predict(x)
            preds = np.argmax(preds, axis=1)

            if preds == 0:
                pass_preds = "Apple scab"
            elif preds == 1:
                pass_preds = "Apple Black_rot"
            elif preds == 2:
                pass_preds = "Cedar apple rust"
            else:
                pass_preds = "Healthy"

            return {"Predicted result": pass_preds}, 201

        except Exception as e:
            logger.exception(e)
            return {"msg": "Internal Error"}, 500
Ejemplo n.º 13
0
    def browse_user_center(self, limit=3):
        """
        # 用户中心浏览
        :param limit: 浏览的项目数
        :return:
        """
        try:
            logger.info("用户中心浏览功能: browsing beginning")
            message_url = "https://www.facebook.com/profile.php"

            page_instance = None
            ready_browse = []
            for i in range(3):
                self.driver.get(message_url)
                user_lines = self.driver.find_elements_by_css_selector(
                    'div[id="fbTimelineHeadline"] li a')[1:4]
                for row in user_lines:
                    if row.text not in ready_browse:
                        page_instance = row
                        ready_browse.append(row.text)
                        break

                self.click(page_instance)
                self.browse_page()
                self.sleep()
            logger.info("用户中心浏览功能: browsing completed")
            return True, 0
        except Exception as e:
            logger.exception("用户中心浏览功能: browsing failed error-->{}".format(e))
            return self.fb_exp.auto_process(3)
Ejemplo n.º 14
0
    def check(self, suggested, data):
        """Return list of tags from 'suggested': first is list of tags that
           passes checks and second is dict of tags whose check(s) did not
           passed (key is tag name and value is why it failed)."""
        passed = set()
        failed = {}
        # Execute all relavant checks and store its results in 'results' dict
        results = {}
        suggested_set = set(suggested)
        relevant_mods = [mod for mod in self.checks if not set(mod.tags).isdisjoint(suggested_set)]
        for mod in relevant_mods:
            result = None
            try:
                results[mod.__name__] = {'out': mod.main(data), 'tags': mod.tags, 'desc': mod.desc}
            except:
                logger.exception('Something failed')
            logger.debug("Check %s returned %s" % (mod.__name__, result))
        # Now take the results of individual checks and compile lists of passed
        # and failed tags
        for result in results.itervalues():
            if result["out"]:
                for tag in result["tags"]:
                    passed.add(tag)
            else:
                for tag in result["tags"]:
                    failed.setdefault(tag, list()).append(result["desc"])

        return passed, failed
Ejemplo n.º 15
0
    def put_taskmng_in_queue(self, task):
        query = sql.SQL("""
            INSERT INTO taskmng_queue
            (sessionuid, sessiontaskid, taskname, action, scheduled_on)
            VALUES( %s, %s, %s, %s, %s)
            RETURNING sessiontaskid
        """)

        params = (task['sessionuid'], task['session_taskid'],
                  task['session_task_name'], task['action'],
                  datetime.datetime.utcnow())

        try:
            db.cursor.execute(query, params)
            db.conn.commit()
            fetch = db.cursor.fetchone()
            logger.debug("FETCH: {}".format(fetch))
            return fetch['sessiontaskid']
        except psycopg2.IntegrityError as e:
            logger.warning(e)
            db.conn.rollback()
        except psycopg2.ProgrammingError:
            logger.exception("!ERROR")
            db.conn.rollback()
        except Exception:
            db.conn.rollback()
            raise
Ejemplo n.º 16
0
    def set_job_by_track_ids(cls, track_ids, values):
        jobs = db_session.query(Job).filter(Job.track_id.in_(track_ids)).all()
        track_ids_copy = track_ids.copy()
        try:
            for job in jobs:
                track_ids.remove(job.track_id)
                value = values.get(job.track_id, {})
                new_status = value.get('status')
                new_result = value.get('result', '')
                new_traceback = value.get('traceback', '')
                if job.status != new_status:
                    # 第一次变成running的时间即启动时间
                    if new_status == 'running':
                        job.start_time = datetime.datetime.now()
                    if new_status in ['succeed', 'failed']:
                        job.end_time = datetime.datetime.now()

                    job.result = new_result
                    job.traceback = new_traceback
                    job.status = new_status
            db_session.commit()
        except:
            logger.exception('set_job_by_track_ids catch exception.')
            db_session.rollback()
            return track_ids_copy
        return track_ids
Ejemplo n.º 17
0
    def screenshots(self, driver, err_code=-1, force=False):
        if self.headless or force or get_system_args()['screenshots_force']:
            try:
                screenshots_dir = get_system_args()['screenshots_dir']
                if not os.path.isdir(screenshots_dir):
                    os.mkdir(screenshots_dir)

                # 先删除5天前的截图,以免服务器磁盘超负荷
                photos = os.listdir(screenshots_dir)
                time_limit = (
                    datetime.now() -
                    timedelta(days=get_system_args()['screenshots_keep'])
                ).strftime("%Y-%m-%d_%H_%M_%S")
                for ph in photos:
                    if ph[0:19] < time_limit:
                        os.remove("{}//{}".format(screenshots_dir, ph))

                path = "{}//{}_{}_{}_{}.png".format(
                    screenshots_dir,
                    datetime.now().strftime("%Y-%m-%d_%H_%M_%S"), self.account,
                    self.password, err_code)
                logger.info("save screenshots, path={}".format(path))
                driver.get_screenshot_as_file(path)
            except Exception as e:
                logger.exception('screenshots exception={}'.format(e))
Ejemplo n.º 18
0
    def create(self, attrs):

        query_template = """
            INSERT INTO %s
            ({})
            VALUES ({})
            RETURNING id
        """ % self.table

        fields = ', '.join(attrs.keys())
        logger.info("Fields: {}".format(fields))
        values_placeholders = ', '.join(['%s' for v in attrs.values()])

        query = query_template.format(fields, values_placeholders)
        params = tuple(attrs.values())

        logger.info("Mogrify: {}".format(db.cursor.mogrify(query, params)))

        try:
            db.cursor.execute(query, params)
            db.conn.commit()
            fetch = db.cursor.fetchone()
            logger.debug("FETCH: {}".format(fetch))
            return fetch['id']
        except psycopg2.IntegrityError as e:
            logger.info("!!!!ERROR: %s", e)
            db.conn.rollback()
            return self.get_id_by_name(attrs['name'])
        except psycopg2.ProgrammingError:
            logger.exception("!ERROR")
            db.conn.rollback()
        except Exception:
            db.conn.rollback()
            raise
Ejemplo n.º 19
0
def insert_rep(loan_apply_id,tran_no,last_repayment_day,crnt_pr,arfn_pr,crnt_int,arfn_int):
    try:
        f_arfn_pr=float(arfn_pr)#已还本金
        f_arfn_int=float(arfn_int)#已还利息
        f_crnt_pr=float(crnt_pr)#应还本金
        f_crnt_int=float(crnt_int)#应还利息

        if f_arfn_int<f_crnt_int or f_arfn_pr<f_crnt_pr:
            if DAO_overdue.get_is_overdue(loan_apply_id,tran_no):
                status=2
            elif f_arfn_pr==0 and f_arfn_int==0:
                status=0
            else:
                status=1
        else:
            status=3

        total_repayment=f_arfn_pr+f_arfn_int
        logger.info("插入还款编号-"+str(loan_apply_id)+",期数-"+str(tran_no)+"")
        REP_INSERT_STR="INSERT INTO sc_repayment  \
                       (loan_apply_id,repayment_installments,re_principal,re_interest, \
                       clear_date,total_repayment,status)  \
                       VALUES  \
                       (%s,%s,%s,%s,%s,%s,%s)"%(loan_apply_id,tran_no,arfn_pr,arfn_int,last_repayment_day,total_repayment,status)
        INSERT_UPDATE_TRAN(REP_INSERT_STR)
    except:
        logger.exception('exception')

    return None
Ejemplo n.º 20
0
    def check(self, suggested, data):
        """Return list of tags from 'suggested': first is list of tags that
           passes checks and second is dict of tags whose check(s) did not
           passed (key is tag name and value is why it failed)."""
        passed = set()
        failed = {}
        # Execute all relavant checks and store its results in 'results' dict
        results = {}
        suggested_set = set(suggested)
        relevant_mods = [
            mod for mod in self.checks
            if not set(mod.tags).isdisjoint(suggested_set)
        ]
        for mod in relevant_mods:
            result = None
            try:
                results[mod.__name__] = {
                    'out': mod.main(data),
                    'tags': mod.tags,
                    'desc': mod.desc
                }
            except:
                logger.exception('Something failed')
            logger.debug("Check %s returned %s" % (mod.__name__, result))
        # Now take the results of individual checks and compile lists of passed
        # and failed tags
        for result in results.itervalues():
            if result["out"]:
                for tag in result["tags"]:
                    passed.add(tag)
            else:
                for tag in result["tags"]:
                    failed.setdefault(tag, list()).append(result["desc"])

        return passed, failed
Ejemplo n.º 21
0
    def run(self) -> None:
        # logger.warning('This is a development server. Do not use it in a production deployment.')
        try:
            for task_name in self.deploy_cluster:
                try:
                    schedule.every(self.crontab['action']).minutes.do(
                        self.push_task, task_name=task_name)
                    schedule.every(self.crontab['refresh']).minutes.do(
                        self.rc.refresh,
                        key_name=REDIS_SECRET_KEY.format(task_name))
                    logger.info(
                        f"start {task_name}/crontab:{self.crontab['action']} minutes"
                    )

                except schedule.IntervalError:
                    logger.error('interval set error')

                self.crontab['action'] += 5

            while True:
                schedule.run_pending()
                time.sleep(1)

        except Exception as err:
            logger.exception('Exception occurred ||{}'.format(err))
            noticer.send_email(text_body='{}'.format(err), to='self')
        except KeyboardInterrupt as err:
            logger.stop('Forced stop ||{}'.format(err))
Ejemplo n.º 22
0
    def post(self):
        try:

            data = request.json
            Model_Apple = load_model("Model/Corn_model_inception.h5")
            im_b64 = data["image"]
            im_bytes = base64.b64decode(im_b64)  # im_bytes is a binary image
            im_file = BytesIO(im_bytes)  # convert image to file-like object
            img = Image.open(im_file)  # img is now PIL Image object
            img = img.resize((224, 224))
            # Preprocessing the image
            x = image.img_to_array(img)
            ## Scaling
            x = x / 255
            x = np.expand_dims(x, axis=0)
            preds = Model_Apple.predict(x)
            preds = np.argmax(preds, axis=1)

            if preds == 0:
                pass_preds = "Corn_(maize)___Cercospora_leaf_spot Gray_leaf_spot"
            elif preds == 1:
                pass_preds = "Corn_(maize)___Northern_Leaf_Blight"
            elif preds == 2:
                pass_preds = "Corn_(maize)___healthy"
            elif preds == 3:
                pass_preds = "Corn_(maize)___Common_rust_"

            return {"Predicted result": pass_preds}, 201

        except Exception as e:
            logger.exception(e)
            return {"msg": "Internal Error"}, 500
Ejemplo n.º 23
0
 def fetch_by_social_id(self, social_id):
     try:
         return self.fetch_by_field(field_name='social_id',
                                    field_value=social_id)
     except Exception:
         logger.exception('FETCH BY SOCIAL ID')
         return {}
Ejemplo n.º 24
0
    def process_policy_clause_mobile(self):
        """
        条款和使用政策验证
        :return: 成功返回 True, 失败返回 False
        """
        try:
            logger.info("条款和使用政策验证处理中")
            WebDriverWait(self.driver, 3).until(
                EC.presence_of_element_located(
                    (By.CSS_SELECTOR, self.get_key_words(14))))
            check_button = WebDriverWait(self.driver, 3).until(
                EC.presence_of_element_located(
                    (By.CSS_SELECTOR, 'button[value="J’accepte"]')))
            self.click(check_button)
            check_revenir = WebDriverWait(self.driver, 3).until(
                EC.presence_of_element_located(
                    (By.CSS_SELECTOR,
                     'button[value="Revenir au fil d’actualité"]')))
            self.click(check_revenir)
        except Exception as e:
            logger.exception("条款和使用政策验证处理异常, e={}".format(e))
            return False, 14

        logger.info("条款和使用政策验证处理完成")
        return True, 14
Ejemplo n.º 25
0
    def process_phone_sms_verify_mobile(self):
        """
        # 手机短信验证码验证
        :param kwargs:
        :return: 成功返回 True, 失败返回 False
        提示  为了调试方便 调整为FALSE
        """
        try:
            logger.info("手机短信验证处理中")
            WebDriverWait(self.driver, 6).until(
                EC.presence_of_element_located(
                    (By.CSS_SELECTOR, self.get_key_words(7))))
            # # 操作下拉列表
            # s1 = Select(self.driver.find_element_by_name('p_pc'))
            # s1.select_by_value('CN')
            # # 输入电话号码
            # WebDriverWait(self.driver, 6).until(
            #     EC.presence_of_element_located((By.CSS_SELECTOR, 'input[type="tel"]'))).send_keys('18000000000')
            # # 点击继续
            # WebDriverWait(self.driver, 6).until(
            #     EC.presence_of_element_located((By.CSS_SELECTOR, 'button[id="checkpointSubmitButton-actual-button"]'))).click()
            # email_code = WebDriverWait(self.driver, 6).until(
            #     EC.presence_of_element_located((By.CSS_SELECTOR, 'input[autocorrect="off"]')))
            # if email_code:
            #     logger.info("The mailbox verification code has been sent successfully")
            #     email_code.send_keys('456895')

        except Exception as e:
            logger.exception("处理手机短信验证处理异常, e={}".format(e))
            return False, 7
        logger.info("处理手机短信验证处理完成")
        return False, 7
Ejemplo n.º 26
0
 def wrapper(*args, **kwargs):
     try:
         return func(*args, **kwargs)
     except Exception as e:
         if logger:
             logger.exception(e)
         raise e
Ejemplo n.º 27
0
    def post(self):
        data = json.loads(request.data)
        # logger.debug(data["hr"])
        # logger.debug(data["bin"])

        meta = io.BytesIO(bytes(data["hr"]))
        video = io.BytesIO(base64.b64decode(data["bin"]))

        try:
            minioClient.make_bucket(config.PATIENT_ID)
        except BucketAlreadyOwnedByYou:
            pass
        except BucketAlreadyExists:
            pass
        except ResponseError as err:
            logger.exception("MinIO Error")

        try:
            res = minioClient.put_object(config.PATIENT_ID, 'video', video,
                                         video.getbuffer().nbytes)
            logger.debug(res)
            res = minioClient.put_object(config.PATIENT_ID, 'meta', meta,
                                         meta.getbuffer().nbytes)
            logger.debug(res)
        except Exception:
            logger.exception("MinIO Error")

        return "success", 200
Ejemplo n.º 28
0
def get_type(filepath=None, data=None):
    try:
        if filepath is not None:
            ans = filetype.guess_mime(filepath)
            logger.debug('filetype guess: {} {}'.format(filepath, ans))
            if ans is None:
                if filepath[-5:] == '.json':
                    with open(filepath, 'r') as f:
                        data = json.load(f)
                    if type(data) == list:
                        return 'Sequence'
                    return "Graph"
                elif filepath[-4:] == '.mat':
                    return 'Graph'
                elif filepath[-4:] == '.csv':
                    return "DataFrame"
                return "Text"
            if ans.find('image') != -1:
                return "Image"
            if ans.find('video') != -1:
                return 'Video'
            if ans.find('audio') != -1:
                return 'Audio'
        raise NotImplementedError
    except Exception as e:
        logger.exception('Guess Type Error : {} {}'.format(filepath, e))
Ejemplo n.º 29
0
def _get_landlord_id(landlord_url: str) -> str:
    try:
        return landlord_url.split('/')[5]
    except IndexError:
        return urlparse(landlord_url)[1].split('.')[0]
    except Exception as e:  # pylint: disable=broad-except
        logger.exception(e)
        return landlord_url
Ejemplo n.º 30
0
 def wrapper(*args, **kwargs):
     try:
         auth = request.authorization
         if not auth or not is_authenticated(auth.username, auth.password):
             return unauthorized()
         return f(*args, **kwargs)
     except Exception as e:
         logger.exception(e)
Ejemplo n.º 31
0
    def __getitem__(self, name):
        if name not in self._db_object.keys():
            try:
                self[name] = self.__dict__[name]
            except Exception as e:
                logger.exception(e)
                raise Exception("Error get {}".format(name))

        return self._db_object[name]
Ejemplo n.º 32
0
 def fetch_by_username(self, username):
     try:
         return dict(
             self.fetch_by_field(field_name='username',
                                 field_value=username))
     except Exception:
         logger.exception('FETCH BY Username')
         db.conn.rollback()
         return {}
Ejemplo n.º 33
0
def load_user(user_id):
    try:
        u = Usermod()
        userd = u.fetch_by_field(field_name='id', field_value=user_id)
        u.__dict__.update(userd)
        return u
    except Exception:
        logger.exception("!! Load user exception")
        u.__dict__.update({})
        return u
Ejemplo n.º 34
0
 def run(self, tags, rules, data):
   """Run rules (run all when "rules" is empty, othervise run only these
      listed there) and return dict with their answers"""
   results = []
   for mod in self.rules:
     # Skip rule if we are supposed to run only specific rules and this
     # one is not the choosen one
     if len(rules) > 0 and mod.__name__ not in rules:
       logger.debug("Skipping %s because only specific rules are supposed to run" % mod.__name__)
       continue
     # Skip this rule if there is no intersection of tags we should run
     # and tags this rule should be run for
     if len([val for val in tags if val in mod.tags]) == 0:
       logger.debug("Skipping %s because it is not tagged with provided tags" % mod.__name__)
       continue
     # Finally run the rule
     func = getattr(mod, 'main')
     func_text = getattr(mod, 'text')
     name = getattr(mod, 'name')
     result = None
     used = []
     text = ''
     # Reset list of data rule used
     data.reset_access_list()
     # Now run the rule
     try:
       result = func(data)
     except DataNotAvailable:
       logger.error("Data not available for %s" % mod.__name__)
       result = False
     except:
       logger.exception("Something failed badly when executing %s" % mod.__name__)
       result = False
     logger.info("Rule %s returned %s" % (mod.__name__, result))
     # Store list of data rule has used
     used = data.get_access_list()
     # Now if necessary, get description of whats wrong
     if result:
       try:
         text = func_text(result)
       except:
         logger.exception("Something failed badly when getting description for %s" % mod.__name__)
     # Determine what the result was
     if result:
       status = 'FAIL'
     elif result is False:
       status = 'SKIP'
     elif result is None:
       status = 'PASS'
     else:
       logger.error("Failed to understand to result of %s" % result)
       continue
     # Save what was returned
     results.append({'label': mod.__name__, 'status': status, 'result': result, 'name': name, 'text': text, 'used': used})
   return results
Ejemplo n.º 35
0
	def save_data(self):
		"""
		Save the monitor data to file
		"""
		try:
			data_file = open(Config.DATA_FILE, 'wb')
			pickle.dump(self.vm_data, data_file)
			pickle.dump(self.last_migration, data_file)
			data_file.close()
		except Exception:
			logger.exception("ERROR saving data to the file: " + Config.DATA_FILE + ". Changes not stored!!")
Ejemplo n.º 36
0
def SendEmail_(msg):
  msg['From'] = config.kEmailFrom
  msg['To'] = ', '.join(config.kEmailTo)

  try:
    email_server = smtplib.SMTP(config.kEmailServer)
    email_server.starttls()
    email_server.login(config.kEmailFrom, config.kEmailFromPassword)
    email_server.sendmail(config.kEmailFrom, config.kEmailTo, msg.as_string())
    email_server.quit()
  # SMTPConnectError, SMTPHeloError, SMTPException, SMTPAuthenticationError, ...
  except Exception, e:
    logger.exception(e)
Ejemplo n.º 37
0
	def load_data(self):
		"""
		Load the monitor data from file
		"""
		if os.path.isfile(Config.DATA_FILE):
			try:
				data_file = open(Config.DATA_FILE, 'r')
				self.vm_data = pickle.load(data_file)
				self.last_migration = pickle.load(data_file)
				data_file.close()
			except Exception:
				logger.exception("ERROR loading data file: " + Config.DATA_FILE + ". Data not loaded.")
		else:
			logger.debug("No data file: " + Config.DATA_FILE)
Ejemplo n.º 38
0
	def get_monitored_vms(vm_list, user = None):
		"""
		Get the list of VMs that has the monitored metrics available and filtered by user
		"""
		res = []
		try:
			for vm in vm_list:
				if vm.free_memory:
					# Check the user filter
					if not user or vm.user_id == user:  
						res.append(vm)
		except:
			logger.exception("Error monitoring VMs!")

		return res
Ejemplo n.º 39
0
    def _check_timeout(self):
        while True:
            try:
                to_wakeup = []
                now = time.time()
                for session in self.sessions:
                    _, ts = self.sessions[session]
                    if now - ts > REQUEST_TIMEOUT:
                        to_wakeup.append(session)

                for session in to_wakeup:
                    logger.error("session %d timeout, wakeup it", session)
                    self._wakeup(session, None)
            except:
                 logger.exception("check timeout failed")
            gevent.sleep(3)
Ejemplo n.º 40
0
	def clean_old_data(self, current_vms):
		"""
		Clean old data from the Monitor
		Delete the values of VMs that do not appear in the
		monitoring system.
		To avoid an uncontrolled increase of memory usage.
		"""
		current_vmids = [vm.id for vm in current_vms]

		try:
			for vmid in self.vm_data.keys():
				if vmid not in current_vmids:
					logger.debug("Removing data for old VM ID: %s" % str(vmid))
					del self.vm_data[vmid]
		except:
			logger.exception("ERROR cleaning old data.")
Ejemplo n.º 41
0
    def resolve_address_google(self, address, **kwargs):
        try:
            encoded_address = encode(address)
            address = urllib.quote(encoded_address)

            url = "https://maps.googleapis.com/maps/api/geocode/json?address=%s&sensor=false&region=za&key=%s" % (address, configuration["environment"]["google_key"])
            response = urllib2.urlopen(url)
            js = response.read()
            try:
                js = json.loads(js)
            except ValueError:
                logger.exception("Error trying to resolve %s" % address)
                return None

            results = []
            if "status" in js and js["status"] != "OK": 
                logger.warn("Error trying to resolve %s - %s" % (address, js.get("error_message", "Generic Error")))
                return None

            if "results" in js and len(js["results"]) > 0:
                for result in js["results"]:

                    res = self.reject_partial_match(result)
                    if res: continue

                    if "reject_resolution_to_main_place" in kwargs:
                        try:
                            res = self.reject_resolution_to_main_place(result["formatted_address"], int(kwargs["reject_resolution_to_main_place"][0]))
                        except (ValueError, TypeError):
                            res = self.resolution_to_main_place(result["formatted_address"])
                        if res: continue

                    geom = result["geometry"]["location"]
                    results.append({
                        "lat" : geom["lat"],
                        "lng" : geom["lng"],   
                        "formatted_address" : result["formatted_address"],
                        "source" : "Google Geocoding API",
                    })

                if len(results) == 0: return None
                return results
        except Exception:
            logger.exception("Error trying to resolve %s" % address)
        return None
Ejemplo n.º 42
0
    def resolve_address_google(self, address, **kwargs):
        encoded_address = encode(address)
        address = urllib.quote(encoded_address)

        url = "https://maps.googleapis.com/maps/api/geocode/json?address=%s&sensor=false&region=za&key=%s" % (address, GOOGLE_API_KEY)
        response = urllib2.urlopen(url)
        js = response.read()
        try:
            js = json.loads(js)
        except ValueError as e:
            logger.exception("Error trying to resolve %s" % address)
            raise StandardError("Couldn't resolve %s: %s" % (address, e.message))

        results = []
        if "status" in js and js["status"] not in ("OK", "ZERO_RESULTS"):
            logger.error("Error trying to resolve %s - %s (%s)" % (address, js.get("error_message", "Generic Error"), js))
            raise StandardError("Couldn't resolve %s: %s" % (address, js.get("error_message")))

        if "results" in js and len(js["results"]) > 0:
            for result in js["results"]:

                res = self.reject_partial_match(result)
                if res: continue

                if "reject_resolution_to_main_place" in kwargs:
                    try:
                        res = self.reject_resolution_to_main_place(result["formatted_address"], int(kwargs["reject_resolution_to_main_place"][0]))
                    except (ValueError, TypeError):
                        res = self.resolution_to_main_place(result["formatted_address"])
                    if res: continue

                geom = result["geometry"]["location"]
                results.append({
                    "lat" : geom["lat"],
                    "lng" : geom["lng"],   
                    "formatted_address" : result["formatted_address"],
                    "source" : "Google Geocoding API",
                })

            if len(results) == 0: return None
            return results
Ejemplo n.º 43
0
    def run(self, tags, rules, data):
        """Run rules (run all when "rules" is empty, othervise run only these
           listed there) and return dict with their answers"""
        results = []
        if len(rules) == 0:
          for rule in self.list_rules(tags):
            rules.append(rule["label"])

        for rulename in rules:
            rule = self.rules[rulename]
            try:
                result = rule.main(data)
            except DataNotAvailable:
                logger.error("Data not available for %s" % rule.__name__)
                result = False
            except:
                logger.exception("Something failed badly when executing %s" % rule.__name__)
                result = False

            logger.info("Rule %s returned %s" % (rule.__name__, result))
            # Store list of data rule has used
            used = data.get_access_list()
            # Now if necessary, get description of whats wrong
            if result:
                try:
                    text = rule.text(result)
                    status = "FAIL"
                except:
                    logger.exception("Something failed badly when getting description for %s" % rule["name"])
            else:
                status = Rules.result_types[result]
                text = ""

            results.append({'label': rule.__name__,
                            'status': status,
                            'result': result,
                            'name': rule.name,
                            'text': text,
                            'used': used})
        return results
Ejemplo n.º 44
0
    def _on_disconnected(self):
        logger.error("sock disconnected")

        # release all sessions and write buf
        try:
            self.write_buf = []
            self._wakeup_all()
        except:
            logger.exception("release all sessions failed")

        while True:
            self.sock = None
            try:
                logger.info("reconnect to (%s:%s)" % self.addr)
                self.connect()
            except:
                gevent.sleep(3)
                exc_type, exc_value, exc_traceback = sys.exc_info()
                logger.error("exception: %s", traceback.format_exception(exc_type, exc_value, exc_traceback))
                continue
            logger.info("reconnect to (%s:%s) succeed" % self.addr)
            break
Ejemplo n.º 45
0
 def check(self, suggested, data):
   """Return list of tags from 'suggested': first is list of tags that
      passes checks and second is dict of tags whose check(s) did not
      passed (key is tag name and value is why it failed)."""
   passed = []
   failed = {}
   # Execute all relavant checks and store its results in 'results' dict
   results = {}
   for mod in self.checks:
     # Skipp this check if it is not relevant to any of the requested tags
     # (has no intersection)
     if len([val for val in suggested if val in mod.tags]) == 0:
       continue
     # Finally run the rule
     func = getattr(mod, 'main')
     result = None
     try:
       results[mod.__name__] = {'out': func(data), 'tags': mod.tags, 'desc': mod.desc}
     except:
       logger.exception('Something failed')
     logger.debug("Check %s returned %s" % (mod.__name__, result))
   # Now take the results of individual checks and compile lists of passed
   # and failed tags
   for tag in suggested:
     status = True
     for k, v in results.iteritems():
       if tag in v['tags']:
         if not v['out']:
           status = False
           break
     if status:
       passed.append(tag)
     else:
       if tag in failed:
         failed[tag].append(v['desc'])
       else:
         failed[tag] = [v['desc']]
   return passed, failed
Ejemplo n.º 46
0
def send_email(msg, recipients):
    flag = True
    while flag:
        if config.fail_conf().report == True:
            if recipients == "admins":
                if config.email_conf().admin_recipients == ['']:
                    try:
                        raise LookupError("No administrator emails configured, sending mail failed.")
                    except:
                        log.exception("No administrator emails configured, sending mail failed.")
                        flag = False
                        break
                message = MIMEText(msg.greeter_admin + msg.body)
                message['To'] = ', '.join(config.email_conf().admin_recipients)
            elif recipients == "users":
                if config.email_conf().user_recipients == ['']:
                    try:
                        raise LookupError("No user emails configured, sending mail failed.")
                    except:
                        log.exception("No user emails configured, sending mail failed.")
                        flag = False
                        break
                message = MIMEText(msg.greeter_user + msg.body)
                message['To'] = ', '.join(config.email_conf().user_recipients)
            message['From'] = config.email_conf().sender
            message['Subject'] = msg.subject
            if config.email_conf().smtp_conn in ('ssl', 'tls'):
                if config.email_conf().smtp_user == None or config.email_conf().smtp_pass == None:
                    log.warning("SMTP Connection type is configured as %s, but a username and password hasn't been configured. This method require login credentials. Trying plain text connection instead." % config.email_conf().smtp_conn.upper())
                    config.email_conf().smtp_conn = None
            try:
                if config.email_conf().smtp_conn == 'ssl':
                    send = smtplib.SMTP_SSL(config.email_conf().smtp_server, config.email_conf().smtp_port)
                else:
                    send = smtplib.SMTP(config.email_conf().smtp_server, config.email_conf().smtp_port)
                if config.email_conf().smtp_conn == 'tls':
                    send.ehlo()
                    send.starttls()
                    send.ehlo()
                    try:
                        send.login(config.email_conf().smtp_user, config.email_conf().smtp_pass)
                    except:
                        pass
                send.sendmail(config.email_conf().sender, message['To'], message.as_string())
                send.close()
            except:
                log.exception("SMTPLIB failed to send email, please check your connection and configuration")
            flag = False
    s.starttls()
    s.login(username, password)
    s.sendmail(msg['From'], to_address, msg.as_string())
    s.close

try:
    url = sys.argv[1]
    to_address = sys.argv[2]
    logger.info('Checking ' + url)
    # Get the page. Leaving verify=True caused SSL errors on some sites 
    page = requests.get(url, verify=False)
    last_modified = datetime.strptime(page.headers['last-modified'], "%a, %d %b %Y %H:%M:%S %Z")
    conn = sqlite3.connect(db_name)
    cur = conn.cursor()
    cur.execute("SELECT last_modified FROM page WHERE url = ?", (url,))
    prev_last_modified = cur.fetchone()
    if not prev_last_modified:
        cur.execute("INSERT INTO page (url, last_modified) VALUES (?, ?)", (url, last_modified))
        send_notification(url, page, last_modified, to_address)
        logger.info('New page encountered - notification sent to ' + to_address)
    elif last_modified > datetime.strptime(prev_last_modified[0], "%Y-%m-%d %H:%M:%S"):
        send_notification(url, page, page.headers['last-modified'], to_address)
        cur.execute("UPDATE page SET last_modified = ? WHERE url = ?", (last_modified, url))
        logger.info('Page changed - notification sent to ' + to_address)
    else:
        logger.info('Page has not changed')
    conn.commit()

except:
    logger.exception("Error encountered - Could be bad args")
Ejemplo n.º 48
0
	def monitor_vm(self, vm, all_vms):
		"""
		Main function of the monitor
		""" 
		try:
			vm_pct_free_memory = float(vm.free_memory)/float(vm.total_memory) * 100.0

			if vm.id not in self.vm_data:
				self.vm_data[vm.id] = VMMonitorData(vm.id)
			
			if self.vm_data[vm.id].mem_diff is None:
				self.vm_data[vm.id].mem_diff = vm.real_memory - vm.total_memory

			vmid_msg = "VMID " + str(vm.id) + ": "
			vm.host = self.get_host_info(vm.host.id)

			logger.info(vmid_msg + "Real Memory: " + str(vm.real_memory))
			logger.info(vmid_msg + "Total Memory: " + str(vm.total_memory))
			logger.info(vmid_msg + "Free Memory: %d (%.2f%%)" % (vm.free_memory, vm_pct_free_memory))

			mem_over_ratio = Config.MEM_OVER
			if vm.mem_over_ratio:
				mem_over_ratio = vm.mem_over_ratio

			if vm_pct_free_memory < (mem_over_ratio - Config.MEM_MARGIN) or vm_pct_free_memory > (mem_over_ratio + Config.MEM_MARGIN):
				now = time.time()
		
				logger.debug(vmid_msg + "VM %s has %.2f%% of free memory, change the memory size" % (vm.id, vm_pct_free_memory))
				if self.vm_data[vm.id].last_set_mem is not None:
					logger.debug(vmid_msg + "Last memory change was %s secs ago." % (now - self.vm_data[vm.id].last_set_mem))
				else:
					self.vm_data[vm.id].original_mem = vm.allocated_memory
					logger.debug(vmid_msg + "The memory of this VM has been never modified. Store the initial memory  : " + str(self.vm_data[vm.id].original_mem))
					self.vm_data[vm.id].last_set_mem = now

				if (now - self.vm_data[vm.id].last_set_mem) < Config.COOLDOWN:
					logger.debug(vmid_msg + "It is in cooldown period. No changing the memory.")
				else:
					used_mem = vm.total_memory - vm.free_memory
					min_free_memory = Config.MIN_FREE_MEMORY
					# check if the VM has defined a specific MIN_FREE_MEMORY value
					if vm.min_free_mem:
						min_free_memory = vm.min_free_mem
					# it not free memory use exponential backoff idea
					if vm.free_memory <= min_free_memory:
						logger.debug(vmid_msg + "No free memory in the VM!")
						if self.vm_data[vm.id].no_free_memory_count > 1:
							# if this is the third time with no free memory use the original size
							logger.debug(vmid_msg + "Increase the mem to the original size.")
							new_mem =  self.vm_data[vm.id].original_mem
							self.vm_data[vm.id].no_free_memory_count = 0
						else:
							logger.debug(vmid_msg + "Increase the mem with 50% of the original.")
							new_mem =  int(used_mem + (self.vm_data[vm.id].original_mem - used_mem) * 0.5)
							self.vm_data[vm.id].no_free_memory_count += 1
					else:
						divider = 1.0 - (mem_over_ratio/100.0)
						logger.debug(vmid_msg + "The used memory %d is divided by %.2f" % (int(used_mem), divider))
						new_mem =  int(used_mem / divider)

					# Check for minimum memory
					if new_mem < Config.MEM_MIN:
						new_mem = Config.MEM_MIN

					# add diff to new_mem value and to total_memory to make it real_memory (vm.real_memory has delays between updates)
					new_mem += self.vm_data[vm.id].mem_diff
					vm.total_memory += self.vm_data[vm.id].mem_diff
					
					# We never set more memory that the initial amount
					if new_mem > self.vm_data[vm.id].original_mem:
						new_mem = self.vm_data[vm.id].original_mem
	
					if abs(int(vm.total_memory)-new_mem) < Config.MEM_DIFF_TO_CHANGE:
						logger.debug(vmid_msg + "Not changing the memory. Too small difference.")
					else:
						logger.debug(vmid_msg + "Changing the memory from %d to %d" % (vm.total_memory, new_mem))
						if new_mem > vm.total_memory:
							# If we increase the memory we must check if the host has enough free space
							if not self.host_has_memory_free(vm.host,new_mem-vm.total_memory):
								# The host has not enough free memory. Let's try to migrate a VM.
								logger.debug(vmid_msg + "The host " + vm.host.name + " has not enough free memory!")
								if Config.MIGRATION:
									logger.debug(vmid_msg + "Let's try to migrate a VM.")
									if vm.host.id in self.last_migration and (now - self.last_migration[vm.host.id]) < Config.MIGRATION_COOLDOWN:
										logger.debug("The host %s is in migration cooldown period, let's wait.." % vm.host.name)
									else:
										if self.migrate_vm(vm.id, vm.host, all_vms):
											logger.debug("A VM has been migrated from host %d. Store the timestamp." % vm.host.id)
											self.last_migration[vm.host.id] = now
								else:
									logger.debug(vmid_msg + "Migration is disabled.")
									if Config.FORCE_INCREASE_MEMORY:
										logger.debug(vmid_msg + "But Force increase memory is activated. Changing memory.")
										self.change_memory(vm.id, vm.host, new_mem)
										self.vm_data[vm.id].last_set_mem = now
									else:
										logger.debug(vmid_msg + "Not increase memory.")
							else:
								logger.debug(vmid_msg + "The host " + vm.host.name + " has enough free memory.")
								self.change_memory(vm.id, vm.host, new_mem)
								self.vm_data[vm.id].last_set_mem = now
						else:
							self.change_memory(vm.id, vm.host, new_mem)
							self.vm_data[vm.id].last_set_mem = now
		except:
			logger.exception("Error in monitor loop!")
Ejemplo n.º 49
0
 def wrapper(*args, **kwargs):
   try:
     func(*args, **kwargs)
   except Exception, e:
     logger.exception(e)
Ejemplo n.º 50
0
    png = Image.new('RGB', (WIDTH*LEVEL, HEIGHT*LEVEL))
    yield png
    logger.info("Saving image")
    os.makedirs(os.path.split(OUTPUT_FILE)[0], exist_ok=True)
    png.save(OUTPUT_FILE, "PNG")


def build_png(png, time_as_url):
    with tqdm(desc="Tiles downloaded", total=LEVEL**2, leave=True,
              unit="tile", unit_scale="true") as pbar:
        for x in range(LEVEL):
            for y in range(LEVEL):
                tile_url = IMAGE_URL.format(LEVEL, WIDTH, time_as_url, x, y)
                tiledata = requests.get(tile_url, timeout=TIMEOUT).content
                tile = Image.open(BytesIO(tiledata))
                png.paste(tile, (WIDTH*x, HEIGHT*y, WIDTH*(x+1), HEIGHT*(y+1)))
                pbar.update()


if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        logger.error("Interrupted by user")
    except requests.exceptions.ConnectionError:
        logger.exception("Connection error! Are you online?")
        sys.exit(1)
    except requests.exceptions.Timeout:
        logger.exception("Timeout error!")
        sys.exit(1)