예제 #1
0
파일: cron_utils.py 프로젝트: onlyfu/T3
    def analyze(self, timestamp, cron_str):
        """
        处理cron表达式
        :param cron_str: 
        :return: 
        """
        x = time.localtime(timestamp)
        second = x.tm_sec
        minute = x.tm_min
        hour = x.tm_hour
        day = x.tm_mday
        month = x.tm_mon
        year = x.tm_year

        cron_list = cron_str.split(' ')

        second_str = cron_list[0]
        minute_str = cron_list[1]
        hour_str = cron_list[2]
        day_str = cron_list[3]
        month_str = cron_list[4]

        right_second, second_index, next_second_index, second_list, second_carry = self.process_base(second_str, 0, 59, second, 0)
        right_minute, minute_index, next_minute_index, minute_list, minute_cary = self.process_base(minute_str, 0, 59, minute, second_carry)
        right_hour, hour_index, next_hour_index, hour_list, hour_carry = self.process_base(hour_str, 0, 23, hour, minute_cary)
        right_day, day_index, next_day_index, day_list, day_cary = self.process_base(day_str, 1, 31, day, hour_carry)
        right_month, month_index, next_month_index, month_list, month_carry = self.process_base(month_str, 1, 12, month, day_cary)

        # left_days = (datetime.date(year, right_month, right_day) - datetime.date(year, month, day)).days
        # left_hour = right_hour - hour
        # left_minute = right_minute - minute
        # left_second = right_second - second
        # # 从小时到秒,挨个判断
        # if left_hour < 0:
        #     # 处理小时
        #     left_hour += 24
        # elif left_hour == 0 and left_minute < 0:
        #     left_minute += 60000
        # elif left_hour == 0 and left_minute == 0 and left_second < 0:
        #     if minute_str == '*':
        #         left_second += 60
        #     elif hour_str == '*':
        #         left_minute += 60
        #     elif day_str == '*':
        #         left_hour += 24
        #
        # left_timestamp = (((left_days * 24 + left_hour) * 60) + left_minute) * 60 + left_second
        if month_carry:
            right_month += month_carry
        date = ('{}-%s-%s %s:%s:%s' % tuple([('00'+str(i))[-2:] for
                                             i in (month, day, hour, minute, second)])).format(year)
        right_date = ('{}-%s-%s %s:%s:%s' % tuple([('00'+str(i))[-2:] for
                                             i in (right_month, right_day, right_hour, right_minute, right_second)])).\
            format(year)

        left_timestamp = DateUtils.str_to_time(right_date) - DateUtils.str_to_time(date)
        return left_timestamp
예제 #2
0
async def async_add_job(service_path='',
                        method='',
                        params={},
                        start_time='',
                        cron='',
                        job_id='',
                        group_name='default',
                        repeat_count=0):
    """
    添加定时任务
    :param service_path:   需要执行的service路径
    :param method:         需要执行的方法
    :param params:         需要传入的参数
    :param start_time: (2018-06-20 16:30:00)
    :param cron: 这里采用五位表达式,从左到右依次表示秒、分、时、天、月  
                 可以使用具体数字或者区间
                 20  表示[20]
                 1-3 表示[1, 2, 3]  
                 1,4,6,7 表示[1, 4, 6, 7]
                 * 表示所有, 
                 1/5 从第1个开始,每五个执行一次
    :param job_id:       任务编号,每个任务的编号都要求唯一
    :param group_name: 
    :param repeat_count: 如果要求无限次执行, 则该值需要传入-1, 同一个任务有限次多次执行的情况暂不考虑,
                         如果业务上有需要, 希望你用多个任务来处理这件事
    :return: 
    """
    if cron:
        current_time = int(DateUtils.timestamps_now())
        left_time = cron_utils.analyze(current_time + 1, cron)
        start_time = DateUtils.format_time(current_time + left_time)
    # 计算距离start_time最近的RUN_TIME秒
    current_date = start_time[:16] + ':00'
    current_count = 1
    while current_date < start_time:
        # 避免死循环
        if current_count >= 1000:
            break
        current_count += 1
        current_date = DateUtils.add_second(current_date, seconds=RUN_TIME)
    start_time = current_date

    job_params = {
        'job_id': job_id,
        'group_name': group_name,
        'start_time': start_time,
        'limit_time': 0,
        'repeat_count': repeat_count,
        'cron': cron,
        'path': service_path,
        'method': method,
        'params': params
    }
    result = await save_job(job_params)
    return result
예제 #3
0
파일: __init__.py 프로젝트: yuiitsu/DSSP
async def save_to_db(data):
    # async def save_to_db(task_unique_id, service_path, method, params_json):
    """
    保存任务到DB
    :param data: list
        data[][task_unique_id]
        data[][service_path]
        data[][method]
        data[][params_json]
        data[][key1]
        data[][key2]
        data[][key3]
    :return:
    """
    if isinstance(data, dict):
        data = [data]

    model = AsyncModelBase()
    sql_list = []
    for item in data:
        key = 'task_unique_id, service_path, method, key1, key2, key3, params, create_time'
        val = '%s, %s, %s, %s, %s, %s, %s, %s'
        duplicate = ['params = %s', 'create_time = %s']
        value = (item['task_unique_id'], item['service_path'], item['method'],
                 item.get('key1', ''), item.get('key2', ''),
                 item.get('key3',
                          ''), item['params_json'], DateUtils.time_now(),
                 item['params_json'], DateUtils.time_now())
        sql_list.append({
            model.sql_constants.SQL_TYPE: model.sql_constants.INSERT,
            model.sql_constants.TABLE_NAME: 'tbl_cfg_task',
            model.sql_constants.DICT_DATA: {
                model.sql_constants.KEY: key,
                model.sql_constants.VAL: val,
                model.sql_constants.DUPLICATE_KEY_UPDATE: duplicate
            },
            model.sql_constants.VALUE_TUPLE: value
        })
    #
    result = await model.do_sqls(sql_list)
    if result is None:
        for item in data:
            logger.info(
                'Task Add [%s] to DB failed, path [%s], method [%s], params: %s',
                item['task_unique_id'], item['service_path'], item['method'],
                item['params_json'])
    else:
        for item in data:
            logger.info(
                'Task Add [%s] to DB success, path [%s], method [%s], params: %s',
                item['task_unique_id'], item['service_path'], item['method'],
                item['params_json'])

    return result
예제 #4
0
def run():
    """
    开始执行
    :return:
    """
    if warning_to_email is None:
        print 'warning mail error, check setting.conf [monitor]'
        return

    last_len = 0
    loop_num = 0
    conn_failed_num = 0
    while True:
        try:
            conn = redis.get_conn()
            list_len = conn.llen(list_cache_key)
        except Exception, e:
            conn_failed_num += 1
            if conn_failed_num >= 6:
                response = send_warning_info(
                    'monitor',
                    json.dumps({
                        'exception message': e.message,
                        'time': DateUtils.time_now()
                    }))
                if response is True:
                    conn_failed_num = 0

            list_len = 0
            logger.error('monitor redis get_conn failed, e: %s', e.message)

        if list_len > 0 and list_len >= last_len:
            loop_num += 1
            # 队列长度保持不变或增加
            if loop_num >= 6:
                response = send_warning_info(
                    'task',
                    json.dumps({
                        'list_len': list_len,
                        'time': DateUtils.time_now()
                    }))
                if response is True:
                    loop_num = 0
            else:
                logger.info('task len add')
        else:
            last_len = list_len
            loop_num = 0
            # print 'task status is normal'

        time.sleep(10)
예제 #5
0
async def update_to_db(task_unique_id, status):
    """
    更新任务状态
    :param task_unique_id:
    :param status:
    :return:
    """
    model = AsyncModelBase()
    fields = [
        'status = %s',
        'execute_time = %s'
    ]
    condition = 'task_unique_id = %s'
    value = (status, DateUtils.time_now(), task_unique_id)
    result = await model.update('tbl_cfg_task', {
        model.sql_constants.FIELDS: fields,
        model.sql_constants.CONDITION: condition
    }, value)
    if result is None:
        logger.info('Task Update [%s] failed, status: %s',
                    task_unique_id, status)
    else:
        logger.info('Task Update [%s] to DB success, status: %s',
                    task_unique_id, status)

    return result
예제 #6
0
파일: jwt.py 프로젝트: yuiitsu/DSSP
    def verify(access_token):
        """
        JWT验签
        :param access_token:
        :return:
        """
        if access_token:
            try:
                header, p, signature = access_token.split('.')
                p = p + '=' * (-len(p) % 4)
                p = base64.decodebytes(p.encode('utf-8')).decode('utf-8')
                p_dict = json.loads(p)
            except Exception as e:
                logger.exception(e)
                return False

            create_time = p_dict['iat']
            expires_in = p_dict['exp']
            time_now = int(dateUtils.timestamps_now())
            if create_time + expires_in < time_now:
                return False

            encoded = JWT.encode(p_dict)
            if encoded != access_token:
                return False

            return p_dict

        return False
예제 #7
0
async def save_to_db(task_unique_id, service_path, method, params_json):
    """
    保存任务到DB
    :param task_unique_id:
    :param service_path:
    :param method:
    :param params_json:
    :return:
    """
    model = AsyncModelBase()
    key = 'task_unique_id, service_path, method, params, create_time'
    val = '%s, %s, %s, %s, %s'
    duplicate = [
        'create_time = %s'
    ]
    value = (task_unique_id, service_path, method, params_json, DateUtils.time_now(), DateUtils.time_now())
    result = await model.insert('tbl_cfg_task', {
        model.sql_constants.KEY: key,
        model.sql_constants.VAL: val,
        model.sql_constants.DUPLICATE_KEY_UPDATE: duplicate
    }, value)
    if not result:
        logger.info('Task Add [%s] to DB failed, path [%s], method [%s], params: %s',
                    task_unique_id, service_path, method, params_json)
    else:
        logger.info('Task Add [%s] to DB success, path [%s], method [%s], params: %s',
                    task_unique_id, service_path, method, params_json)

    return result
예제 #8
0
async def do_schedule():
    """
    扫描下一个时间点可以执行的定时任务
    :return: 
    """
    current_datetime = datetime.datetime.now()
    current_second = current_datetime.second

    quotient = int(current_second / run_time)
    remainder = int(current_second % run_time)
    if remainder > 0:
        quotient = quotient + 1
    next_second = quotient * run_time
    if next_second == 60:
        next_datetime = current_datetime + datetime.timedelta(minutes=1)
        next_second = 0
    else:
        next_datetime = current_datetime
    next_timestamp = time.mktime(next_datetime.timetuple())
    next_time = DateUtils.format_time(next_timestamp,
                                      time_format=key_YYMMDDHHMM)
    # 当前时间点位于0秒或者30秒,则读取job队列并执行
    cache_key = ServiceBase.schedule.JOB_KEY + next_time + format_second(
        next_second)
    job_list = await redis.smembers(cache_key)
    if job_list:
        # 开协程执行任务
        await do_job_list(job_list)
    await redis.delete(cache_key)
    await redis.hdel(ServiceBase.schedule.SCHEDULE_KEY, cache_key)
예제 #9
0
async def add_job(job_list):
    """
    添加任务至任务队列
    :param job_list: 
    :return: 
    """
    logger.info('添加任务至队列')
    current_time = DateUtils.time_now()
    if job_list:
        for job in job_list:
            if isinstance(job['params'], str):
                try:
                    job['params'] = json.loads(job['params'])
                except Exception as e:
                    logger.exception('JSON ERROR', e)
            del job['create_time']
            try:
                cache_key = ServiceBase.schedule.JOB_KEY + job[
                    'start_time'].replace(' ', '').replace('-', '').replace(
                        ':', '')
                if current_time >= job['start_time']:
                    # 当前时间大于任务执行时间, 则立刻执行任务, 然后删除当前的key
                    await do_job(job, is_normal=False)
                    await redis.delete(cache_key)
                else:
                    # 将任务添加进对应的队列组, 采用set所以当添加重复元素时,重复元素会被忽略
                    await redis.sadd(cache_key,
                                     json.dumps(job, cls=CJsonEncoder))
                    length = await redis.scard(cache_key)
                    await redis.hset(ServiceBase.schedule.SCHEDULE_KEY,
                                     cache_key, length)
            except Exception as e:
                logger.exception('ADD JOB ERROR', e)
                await task.save_task_error(job, e)
예제 #10
0
파일: monitor.py 프로젝트: onlyfu/T3
def run():
    """
    开始执行
    :return:
    """
    if warning_to_email is None:
        print 'warning mail error, check setting.conf [monitor]'
        return

    last_len = 0
    loop_num = 0
    conn_failed_num = 0
    while True:
        try:
            conn = redis.get_conn()
            list_len = conn.llen(list_cache_key)
        except Exception, e:
            conn_failed_num += 1
            if conn_failed_num >= 6:
                response = send_warning_info('monitor', json.dumps({
                    'exception message': e.message,
                    'time': DateUtils.time_now()
                }))
                if response is True:
                    conn_failed_num = 0

            list_len = 0
            logger.error('monitor redis get_conn failed, e: %s', e.message)

        if list_len > 0 and list_len >= last_len:
            loop_num += 1
            # 队列长度保持不变或增加
            if loop_num >= 6:
                response = send_warning_info('task', json.dumps({
                    'list_len': list_len,
                    'time': DateUtils.time_now()
                }))
                if response is True:
                    loop_num = 0
            else:
                logger.info('task len add')
        else:
            last_len = list_len
            loop_num = 0
            # print 'task status is normal'

        time.sleep(10)
def get_start_end_date():
    now_time = long(now.strftime('%H%M%S'))
    if (80000 < now_time < 190000) or (0 < now_time < 30000):
        last_trading_day = DateUtils().get_last_trading_day('%Y-%m-%d', now_date_str)
        start_date = last_trading_day + ' 21:00:00'
    else:
        start_date = now_date_str + ' 21:00:00'
    end_date = now.strftime('%Y-%m-%d %H:%M:%S')
    return start_date, end_date
예제 #12
0
파일: task_runner.py 프로젝트: yuiitsu/DSSP
 async def loop(self, task_id):
     """
     1. 获取一个队列数据
     2. 解析数据
     3. 调用目标方法
     :return:
     """
     logger.info('task[%s] started.', task_id)
     await task.register_coroutine(self.server_name, task_id, int(DateUtils.timestamps_now()))
     await self.process(task_id)
예제 #13
0
 def start(self):
     """
     从数据库中读取数据,初始化定时任务
     :return: 
     :update: wsy 2017/8/11
     """
     if self.job_stores:
         conn = self.pool.connection()
         cursor = conn.cursor(cursorclass=MySQLdb.cursors.DictCursor)
         # 查找出数据库中现存的所有任务
         sql = 'SELECT * FROM tbl_cfg_schedule_job ORDER BY start_time DESC'  # where start_time > now() or (repeat_count = -1)
         cursor.execute(sql)
         job_list = cursor.fetchall()
         for job in job_list:
             try:
                 job_params = pickle.loads(job['job_data'])
                 if job_params['cron']:
                     current_timestamp = int(time.time())
                     job_params['left_time'] = self.cron_utils.analyze(
                         current_timestamp, job_params['cron'])
                     job_params['start_time'] = DateUtils.format_time(
                         current_timestamp + job_params['left_time'])
                 if job_params['type'] == 'one':
                     """
                     单次任务
                     """
                     start_timestamp = int(
                         time.mktime(
                             time.strptime(job['start_time'],
                                           '%Y-%m-%d %H:%M:%S')))
                     current_timestamp = int(time.time())
                     if current_timestamp > start_timestamp:
                         left_time = 0
                         print('启动时间小于当前时间,立刻执行')
                     else:
                         left_time = start_timestamp - current_timestamp
                     job_params['left_time'] = left_time
                 elif job_params['type'] == 'many':
                     pass
                 elif job_params['type'] == 'circle':
                     pass
                 thread = threading.Thread(target=self.func,
                                           args=[job_params])
                 thread.start()
                 self.job_id_list[job_params['job_id']] = 1
                 print '定时任务启动'
                 print job_params
             except Exception, e:
                 print Exception, ':', e
예제 #14
0
async def add(path='', method='', arguments=None, is_priority=False, sub_task=None, task_unique_id=None):
    """
    添加任务
    :param path: 调用包文件路径
    :param method:  调用方法
    :param arguments: 请求参数
    :param is_priority: 是否优先处理(True or False)
    :param sub_task: 是否有子任务
            sub_task['queue_key'] 目标队列key
            sub_task['task_num'] 任务数
    :param task_unique_id
    :return:
    """
    #
    arguments_json = json.dumps(arguments, cls=CJsonEncoder)
    if not task_unique_id:
        task_unique_id = str(int(DateUtils.timestamps_now())) + str(random.randrange(10000, 100000))

    logger.info('Task Add [%s], path [%s], method [%s], params: %s',
                task_unique_id, path, method, arguments_json)
    #
    await save_to_db(task_unique_id, path, method, arguments_json)
    if (path and method and arguments) or sub_task:
        params = {
            'task_unique_id': task_unique_id,
            'path': path,
            'method': method,
            'arguments': arguments,
            'sub_task': sub_task
        }

        try:
            params = json.dumps(params, cls=CJsonEncoder)
            if is_priority:
                result = await redis.rpush(task_queue, params)
            else:
                result = await redis.lpush(task_queue, params)
            #
            if result:
                logger.info('Task Add [%s] to QUEUE success, path [%s], method [%s], params: %s',
                            task_unique_id, path, method, arguments_json)
            else:
                logger.info('Task Add [%s] to QUEUE failed, path [%s], method [%s], params: %s',
                            task_unique_id, path, method, arguments_json)
        except Exception as e:
            await Report.report('添加任务异常', e)
예제 #15
0
    async def check_queue(cls):
        """
        检查任务队列
            每10秒检查一次,如果任务队列的长度比前一次检查的长度长,且前一次检查长度不为0,那么发送报警邮件
        :return:
        """
        try:
            task_queue_len = await redis.llen(cls.task_queue)
        except Exception:
            raise

        if task_queue_len > 0:
            if (cls.task_queue_length > 0) and (task_queue_len >=
                                                cls.task_queue_length):
                if cls.task_queue_report_num >= 3:
                    report_html = """
                        <tr>
                            <td style="font-size:30px;">任务队列未能即时消费,当前任务数量:{},前一次检查任务数量:{},请注意.</td>
                        </tr>
                        <tr>
                            <td>可能原因:</td>
                        </tr>
                        <tr>
                            <td>1.任务消费能力不足。 如果是,程序将在3次检查之后自动增加消费者进行处理。之后如果仍然不能停止报警,见2</td>
                        </tr>
                        <tr>
                            <td>2.任务进程死亡。检查任务死亡原因,并重启服务</td>
                        </tr>
                        <tr>
                            <td>Task Monitor report at: {}</td>
                        </tr>
                    """.format(task_queue_len, cls.task_queue_length,
                               DateUtils.time_now())
                    await Report.report("", cls.mail_template(report_html))

                    if cls.task_queue_report_num >= 6:
                        await task.add("", "", None, False, {
                            'loop_num': 1,
                        })
                else:
                    cls.task_queue_report_num = cls.task_queue_report_num + 1
            else:
                cls.task_queue_report_num = 0

            cls.task_queue_length = task_queue_len
예제 #16
0
async def cal_next_start_time(job, is_normal=True):
    """
    计算下一次定时任务发生的时间
    :param job: 
    :param is_normal
    :return: 
    """
    # 无限循环执行, 必定带正则表达式,否则直接报错
    # 解析正则表达式,  计算出下一次需要执行的时间点
    if not is_normal:
        current_time = int(DateUtils.timestamps_now())
    else:
        current_time = DateUtils.str_to_time(job['start_time'])
    left_time = cron_utils.analyze(current_time + 1, job['cron'])
    start_time = DateUtils.format_time(current_time + left_time)
    # 计算距离start_time最近的RUN_TIME秒
    current_date = start_time[:16] + ':00'
    current_count = 1
    while current_date < start_time:
        # 避免死循环
        if current_count >= 1000:
            break
        current_count += 1
        current_date = DateUtils.add_second(current_date, seconds=run_time)
    start_time = current_date
    job['start_time'] = start_time
    cache_key = ServiceBase.schedule.JOB_KEY + job['start_time'].replace(
        ' ', '').replace('-', '').replace(':', '')
    now_date = DateUtils.time_now()
    # 如果下一次的执行时间小于当前时间,则跳至下一个执行的时间节点
    if job['start_time'] < now_date:
        logger.info('任务下一次执行时间小于当前时间')

        current_date = now_date[:16] + ':00'

        while current_date < now_date:

            current_date = DateUtils.add_second(current_date,
                                                seconds=2 * run_time)

        job['start_time'] = current_date

        cache_key = ServiceBase.schedule.JOB_KEY + job['start_time'].replace(
            ' ', '').replace('-', '').replace(':', '')

    model = importlib.import_module('task.schedule.model')
    model = model.Model()
    await model.update_job(job)
    await redis.sadd(cache_key, json.dumps(job, cls=CJsonEncoder))
    length = await redis.scard(cache_key)
    await redis.hset(ServiceBase.schedule.SCHEDULE_KEY, cache_key, length)
예제 #17
0
파일: my_scheduler.py 프로젝트: onlyfu/T3
 def start(self):
     """
     从数据库中读取数据,初始化定时任务
     :return: 
     :update: wsy 2017/8/11
     """
     if self.job_stores:
         conn = self.pool.connection()
         cursor = conn.cursor(cursorclass=MySQLdb.cursors.DictCursor)
         # 查找出数据库中现存的所有任务
         sql = 'SELECT * FROM tbl_cfg_schedule_job ORDER BY start_time DESC'  # where start_time > now() or (repeat_count = -1)
         cursor.execute(sql)
         job_list = cursor.fetchall()
         for job in job_list:
             try:
                 job_params = pickle.loads(job['job_data'])
                 if job_params['cron']:
                     current_timestamp = int(time.time())
                     job_params['left_time'] = self.cron_utils.analyze(current_timestamp, job_params['cron'])
                     job_params['start_time'] = DateUtils.format_time(current_timestamp + job_params['left_time'])
                 if job_params['type'] == 'one':
                     """
                     单次任务
                     """
                     start_timestamp = int(time.mktime(time.strptime(job['start_time'], '%Y-%m-%d %H:%M:%S')))
                     current_timestamp = int(time.time())
                     if current_timestamp > start_timestamp:
                         left_time = 0
                         print ('启动时间小于当前时间,立刻执行')
                     else:
                         left_time = start_timestamp - current_timestamp
                     job_params['left_time'] = left_time
                 elif job_params['type'] == 'many':
                     pass
                 elif job_params['type'] == 'circle':
                     pass
                 thread = threading.Thread(target=self.func, args=[job_params])
                 thread.start()
                 self.job_id_list[job_params['job_id']] = 1
                 print '定时任务启动'
                 print job_params
             except Exception, e:
                 print Exception, ':', e
예제 #18
0
    def run(self):
        # 获取参数
        loop_num = 0
        config_file = ''
        arguments = sys.argv
        for k, v in enumerate(arguments):
            if v == '-t':
                loop_num = arguments[k + 1]

            if v == '-c':
                config_file = arguments[k + 1]

            if v == '-s':
                self.server_name = arguments[k + 1]

        if config_file:
            self.parse_config(config_file)

        if loop_num:
            task.loop_num = loop_num

        #
        if not self.server_name:
            self.server_name = 'S' + str(random.randrange(10000, 100000))

        event_loop = asyncio.get_event_loop()
        # register server
        asyncio.ensure_future(
            task.register_server(server_name=self.server_name,
                                 create_time=int(DateUtils.timestamps_now())))

        # main loop
        for i in range(task.loop_num):
            asyncio.ensure_future(self.loop(str(i)))

        # monitor loop
        # asyncio.ensure_future(Monitor.start())

        event_loop.run_forever()
예제 #19
0
파일: my_scheduler.py 프로젝트: onlyfu/T3
 def add_job(self, service_path, method, params={}, job_id='',
             group='default', start_time='', limit_time=0, repeat_count=0, cron=''):
     """
     添加定时任务
     :param job: 
     :return: 
     """
     if job_id in self.job_id_list:
         print Exception('job_id {%s} 已存在' % job_id)
         return
     if self.job_stores and not job_id:
         print Exception('如果想序列化至数据源,则job_id非空')
         return
     try:
         left_time = 0
         if cron:
             current_timestamp = int(time.time())
             # 如果传入cron表达式,则以cron表达式为主
             left_time = self.cron_utils.analyze(current_timestamp, cron)
             start_time = DateUtils.format_time(current_timestamp + left_time)
         if start_time and not limit_time and not repeat_count:
             """
             单次任务
             """
             start_timestamp = int(time.mktime(time.strptime(start_time, '%Y-%m-%d %H:%M:%S')))
             current_timestamp = int(time.time())
             if current_timestamp > start_timestamp:
                 print '启动时间小于当前时间, 立刻启动'
                 left_time = 0
             else:
                 left_time = start_timestamp - current_timestamp
             type = 'one'
         elif limit_time >= 0 and repeat_count > 0:
             """
             多次任务,但不是无限循环
             """
             type = 'many'
         elif repeat_count < 0:
             """
             无限循环
             """
             type = 'circle'
         job_params = {
             'params': params,
             'service_path': service_path,
             'method': method,
             'left_time': left_time,
             'limit_time': limit_time,
             'repeat_count': repeat_count,
             'type': type,
             'job_id': job_id,
             'group_name': group,
             'start_time': start_time,
             'cron': cron
         }
         thread = threading.Thread(target=self.func, args=[job_params])
         thread.start()
         self.job_id_list[job_id] = 1
         # 将任务存储至数据库
         if self.job_stores:
             self.save_job(job_params)
         print '定时任务启动'
         print job_params
         return thread
     except Exception, e:
         print Exception("启动定时任务失败")
         return
예제 #20
0
    def analyze(self, timestamp, cron_str):
        """
        处理cron表达式
        :param cron_str: 
        :return: 
        """
        x = time.localtime(timestamp)
        second = x.tm_sec
        minute = x.tm_min
        hour = x.tm_hour
        day = x.tm_mday
        month = x.tm_mon
        year = x.tm_year

        month_max_days = calendar.monthrange(year, month)[1]

        cron_list = cron_str.split(' ')

        second_str = cron_list[0]
        minute_str = cron_list[1]
        hour_str = cron_list[2]
        day_str = cron_list[3]
        month_str = cron_list[4]

        right_second, second_index, next_second_index, second_list, second_carry = self.process_base(
            second_str, 0, 59, second, 0)
        right_minute, minute_index, next_minute_index, minute_list, minute_cary = self.process_base(
            minute_str, 0, 59, minute, second_carry)
        right_hour, hour_index, next_hour_index, hour_list, hour_carry = self.process_base(
            hour_str, 0, 23, hour, minute_cary)
        right_day, day_index, next_day_index, day_list, day_cary = self.process_base(
            day_str, 1, month_max_days, day, hour_carry)
        right_month, month_index, next_month_index, month_list, month_carry = self.process_base(
            month_str, 1, 12, month, day_cary)

        # left_days = (datetime.date(year, right_month, right_day) - datetime.date(year, month, day)).days
        # left_hour = right_hour - hour
        # left_minute = right_minute - minute
        # left_second = right_second - second
        # # 从小时到秒,挨个判断
        # if left_hour < 0:
        #     # 处理小时
        #     left_hour += 24
        # elif left_hour == 0 and left_minute < 0:
        #     left_minute += 60000
        # elif left_hour == 0 and left_minute == 0 and left_second < 0:
        #     if minute_str == '*':
        #         left_second += 60
        #     elif hour_str == '*':
        #         left_minute += 60
        #     elif day_str == '*':
        #         left_hour += 24
        #
        # left_timestamp = (((left_days * 24 + left_hour) * 60) + left_minute) * 60 + left_second
        if month_carry:
            right_month += month_carry
        date = (
            '{}-%s-%s %s:%s:%s' %
            tuple([('00' + str(i))[-2:]
                   for i in (month, day, hour, minute, second)])).format(year)
        right_date = ('{}-%s-%s %s:%s:%s' % tuple([('00'+str(i))[-2:] for
                                             i in (right_month, right_day, right_hour, right_minute, right_second)])).\
            format(year)

        left_timestamp = DateUtils.str_to_time(
            right_date) - DateUtils.str_to_time(date)
        return left_timestamp
예제 #21
0
 def add_job(self,
             service_path,
             method,
             params={},
             job_id='',
             group='default',
             start_time='',
             limit_time=0,
             repeat_count=0,
             cron=''):
     """
     添加定时任务
     :param job: 
     :return: 
     """
     if job_id in self.job_id_list:
         print Exception('job_id {%s} 已存在' % job_id)
         return
     if self.job_stores and not job_id:
         print Exception('如果想序列化至数据源,则job_id非空')
         return
     try:
         left_time = 0
         if cron:
             current_timestamp = int(time.time())
             # 如果传入cron表达式,则以cron表达式为主
             left_time = self.cron_utils.analyze(current_timestamp, cron)
             start_time = DateUtils.format_time(current_timestamp +
                                                left_time)
         if start_time and not limit_time and not repeat_count:
             """
             单次任务
             """
             start_timestamp = int(
                 time.mktime(time.strptime(start_time,
                                           '%Y-%m-%d %H:%M:%S')))
             current_timestamp = int(time.time())
             if current_timestamp > start_timestamp:
                 print '启动时间小于当前时间, 立刻启动'
                 left_time = 0
             else:
                 left_time = start_timestamp - current_timestamp
             type = 'one'
         elif limit_time >= 0 and repeat_count > 0:
             """
             多次任务,但不是无限循环
             """
             type = 'many'
         elif repeat_count < 0:
             """
             无限循环
             """
             type = 'circle'
         job_params = {
             'params': params,
             'service_path': service_path,
             'method': method,
             'left_time': left_time,
             'limit_time': limit_time,
             'repeat_count': repeat_count,
             'type': type,
             'job_id': job_id,
             'group_name': group,
             'start_time': start_time,
             'cron': cron
         }
         thread = threading.Thread(target=self.func, args=[job_params])
         thread.start()
         self.job_id_list[job_id] = 1
         # 将任务存储至数据库
         if self.job_stores:
             self.save_job(job_params)
         print '定时任务启动'
         print job_params
         return thread
     except Exception, e:
         print Exception("启动定时任务失败")
         return
예제 #22
0
# -*- coding: utf-8 -*-
# md5校验
import os
from model.server_constans import ServerConstant
from tools.date_utils import DateUtils
import hashlib

date_utils = DateUtils()
today_filter_str = date_utils.get_today_str('%Y-%m-%d')
server_constant = ServerConstant()
interval_time = 60


def __read_chunks(fh):
    fh.seek(0)
    chunk = fh.read(8096)
    while chunk:
        yield chunk
        chunk = fh.read(8096)
    else:
        fh.seek(0)  # 最后要将游标放回文件开头


# 计算文件的MD5值
def __get_file_md5(file_path):
    m = hashlib.md5()
    if os.path.exists(file_path):
        with open(file_path, "rb") as fh:
            for chunk in __read_chunks(fh):
                m.update(chunk)
    else:
예제 #23
0
# -*- coding: utf-8 -*-
"""
@author: Yuiitsu
@file: report
@time: 2018/7/13 16:32
"""
from email.message import EmailMessage
from tornado_smtp.client import TornadoSMTP

from source.properties import Properties
from tools.date_utils import DateUtils
from tools.logs import Logs

properties = Properties('task')
date_utils = DateUtils()
logger = Logs().logger


class Report:

    send_time = 0
    smtp_server = properties.get('smtp', 'server')
    smtp_account = properties.get('smtp', 'account')
    smtp_pass = properties.get('smtp', 'pass')
    report_from = properties.get('report', 'from')
    report_to = properties.get('report', 'to')
    report_server = properties.get('report', 'server')

    @classmethod
    async def report(cls, content, error_track):
        """
# -*- coding: utf-8 -*-
# 每日3点后更新pf_position表数据,重新创建明日数据,并进行赋值long->long_avail->yd_positong_long->yd_long_remain
import sys
import MySQLdb
import datetime

from tools.date_utils import DateUtils
from tools.getConfig import getConfig

d1 = datetime.datetime.now()
todayStr = d1.strftime('%Y-%m-%d')
next_day_str = DateUtils().get_next_trading_day('%Y-%m-%d')

if __name__ == '__main__':
    print 'Enter pf_position_rebuild.py'
    cfg_dict = getConfig()
    try:
        conn = MySQLdb.connect( \
            host=cfg_dict['host'], user=cfg_dict['user'], passwd=cfg_dict['password'], \
            port=int(cfg_dict['db_port']), db='common', charset='utf8')
    except Exception, e:
        print e
        sys.exit(-1)

    cursor = conn.cursor()
    if next_day_str == '':
        sys.exit()

    del_sql = 'delete from portfolio.pf_position where date = %s'
    del_param = (next_day_str, )
    execute = cursor.execute(del_sql, del_param)
예제 #25
0
파일: task_runner.py 프로젝트: yuiitsu/DSSP
    async def process(self, task_id, task_queue_key=None, is_break=False):
        """
        处理任务
        :param task_id: 任务ID
        :param task_queue_key: 指定队列的缓存KEY
        :param is_break: 是否需要停止循环
        :return:
        """
        while True:
            #
            status = await task.get_server_status(self.server_name)
            if not status or status == '0':
                logger.info('Server [%s] shutdown, task [%s] shutdown', self.server_name, task_id)
                self.is_running = False
                await asyncio.sleep(10)
                continue

            if not self.is_running:
                self.is_running = True
                logger.info('Server [%s] start, task [%s] start', self.server_name, task_id)
            #
            is_process_success = 'success'
            task_unique_id = ''
            item = await task.get_one(task_queue_key)
            if item is False:
                logger.info('Server [%s], task [%s], get seed failed.', self.server_name, task_id)
                await Report.report('获取任务异常', '')

            if item:
                try:
                    target = json.loads(item)
                    task_unique_id = target.get('task_unique_id', '')
                    path = target['path']
                    method = target['method']
                    params = target['arguments']
                    sub_task = target['sub_task']
                    #
                    logger.info('Task [%s] Pop [%s] from queue, path [%s], method [%s], params: %s',
                                task_id, task_unique_id, path, method, json.dumps(params))
                    #
                    if sub_task:
                        self.create_sub_task(sub_task)
                    else:
                        result = await ServiceBase().cs(path, method, params)
                        logger.info('task[%s], path: %s, method: %s result: %s', task_id, path, method, result)
                        if not result or 'code' not in result or (result['code'] != 0 and result['code'] != 1004):
                            # failed
                            raise ValueError(result)
                except Exception as e:
                    logger.exception(e)
                    is_process_success = 'failed'
                    await task.save_task_error(item, e)
                #
                if task_unique_id:
                    await task.update_to_db(task_unique_id, is_process_success)
            else:
                # logger.info('task[%s] wait seed.', task_id)
                if is_break:
                    logger.info('task[%s] break.', task_id)
                    break

            #
            await task.refresh_coroutine(self.server_name, task_id, int(DateUtils.timestamps_now()))
            await asyncio.sleep(1)
# -*- coding: utf-8 -*-
# 新湖交易系统持仓数据更新——讯投
from datetime import datetime
from model.order import Order
from model.trade import Trade
from model.position import Position
from model.BaseModel import *
from model.server_constans import ServerConstant
from tools.getConfig import getConfig
from tools.file_utils import FileUtils
from tools.date_utils import DateUtils

now = datetime.now()
now_date_str = now.strftime('%Y-%m-%d')
now_datetime_str = now.strftime('%Y-%m-%d %H:%M:%S')
last_trading_day = DateUtils().get_last_trading_day('%Y-%m-%d', now_date_str)
file_path = getConfig()['datafetcher_file_path']

HEDGE_FLAG_MAP = {
    '49': '0',
}
Direction_Map = {'48': '2', '49': '3'}


def read_position_file_xt(xt_file_path, add_flag):
    print 'Start read file:' + xt_file_path
    fr = open(xt_file_path)
    order_array = []
    trade_array = []
    trading_account_array = []
    investor_position_array = []
예제 #27
0
파일: cron_utils.py 프로젝트: onlyfu/T3
                    return compare_time, index, index + 1, str_list, 1
                else:
                    return compare_time, index, 0, str_list, 1
            else:
                for str_item in str_list:
                    if int(str_item) > compare_time:
                        index = str_list.index(str_item)
                        if len(str_list) > index + 1:
                            return int(str_item), index, index + 1, str_list, 1
                        else:
                            return int(str_item), index, 0, str_list, 1
                return int(str_list[0]), 0, -1, str_list, 1


if __name__ == '__main__':
    cron_utils = CronUtils()
    current_time = int(time.time())
    # cron_str = '0 0 1 * *'
    cron_str = '0 0 10,17 * *'
    for i in range(20):
        left_time = cron_utils.analyze(current_time, cron_str)
        if left_time == 0:
            left_time += 1
            print DateUtils.format_time(current_time), left_time
            current_time += left_time
        else:
            current_time += left_time
            print DateUtils.format_time(current_time), left_time
            current_time += 1

예제 #28
0
파일: __init__.py 프로젝트: yuiitsu/DSSP
async def add(path='',
              method='',
              arguments=None,
              is_priority=False,
              sub_task=None,
              task_unique_id=None,
              batch_data=None):
    """
    添加任务
    :param path: 调用包文件路径
    :param method:  调用方法
    :param arguments: 请求参数
    :param is_priority: 是否优先处理(True or False)
    :param sub_task: 是否有子任务
            sub_task['queue_key'] 目标队列key
            sub_task['task_num'] 任务数
    :param task_unique_id
    :param batch_data: list
        batch_data[][service_path]
        batch_data[][method]
        batch_data[][arguments]
        batch_data[][key1]
        batch_data[][key2]
        batch_data[][key3]
    :return:
    """
    #
    arguments_json = json.dumps(arguments, cls=CJsonEncoder)
    if not task_unique_id:
        task_unique_id = str(int(DateUtils.timestamps_now())) + str(
            random.randrange(10000, 100000))

    logger.info('Task Add [%s], path [%s], method [%s], params: %s',
                task_unique_id, path, method, arguments_json)
    #
    if batch_data:
        for item in batch_data:
            if not item.get('task_unique_id'):
                item['task_unique_id'] = str(int(
                    DateUtils.timestamps_now())) + str(
                        random.randrange(10000, 100000))
            #
            item['params_json'] = json.dumps(item['arguments'],
                                             cls=CJsonEncoder)
            # del item['arguments']
    else:
        batch_data = [{
            'task_unique_id': task_unique_id,
            'service_path': path,
            'method': method,
            'params_json': arguments_json
        }]
    #
    await save_to_db(batch_data)
    for item in batch_data:
        params = {
            'task_unique_id': item['task_unique_id'],
            'path': item['service_path'],
            'method': item['method'],
            'key1': item.get('key1', ''),
            'key2': item.get('key2', ''),
            'key3': item.get('key3', ''),
            'arguments': item['arguments'],
            'sub_task': sub_task
        }

        try:
            params = json.dumps(params, cls=CJsonEncoder)
            if is_priority:
                result = await redis.rpush(task_queue, params)
            else:
                result = await redis.lpush(task_queue, params)
            #
            if result:
                logger.info(
                    'Task Add [%s] to QUEUE success, path [%s], method [%s], '
                    'key1 [%s], key2 [%s], key3 [%s], params: %s',
                    item['task_unique_id'], item['service_path'],
                    item['method'], item.get('key1', ''), item.get('key2', ''),
                    item.get('key3', ''), item['params_json'])
            else:
                logger.info(
                    'Task Add [%s] to QUEUE failed, path [%s], method [%s], ',
                    'key1 [%s], key2 [%s], key3 [%s], params: %s',
                    item.get('key1', ''), item.get('key2', ''),
                    item.get('key3', ''), item['params_json'])
        except Exception as e:
            await Report.report('添加任务异常', e)