def redis_set(key, value, timeout=60):
    s = Serializer(current_app.config["SECRET_KEY"], timeout)
    try:
        data = s.dumps(value)
        redis.set(key, data)
        return True
    except Exception:
        return False
Beispiel #2
0
def set_state(state):
    gp.setwarnings(False)
    gp.setmode(gp.BOARD)
    gp.setup(PIN, gp.OUT)

    gp.output(PIN, state)
    print "setting to state:", state
    redis.set(app.config.get('PIN_STATE'), state)
Beispiel #3
0
def group():
    if request.method == 'POST':
        roles.create(**request.form.to_dict())
        # 每次添加角色,更新redis roles
        all_roles = roles.all()
        all_role_ids = map(lambda x: x.id, all_roles)
        all_role_names = map(lambda x: x.name, all_roles)  
        role_map = dict(zip(all_role_names, all_role_ids))
        redis.set('roles', json.dumps(role_map))
        return jsonify(code=200, msg='添加成功')
    _roles = roles.all()
    return render_template('auth/groups.html', roles=_roles)
def get_post(post_id):

    rpost = redis.get(str(post_id))
    rpost = None       
    if rpost is None:
        post = Post.query.filter_by(id = post_id).first()
        if post is None:
            abort(404)
        redis.set(str(post_id), pickle.dumps(post)) 
    else:
        post = pickle.loads(rpost)

    return get_view(post)
Beispiel #5
0
def make_token(privileges=DEFAULT_PRIVILEGES, length=8):
    """ Generate and record a password to redis. """
    password = ''.join(
            [random.choice(PASSWORD_CHARS) for _ in xrange(length)]
    )
    redis.set(
            _PASSWORD_PREFIX + password,
            json.dumps(
                dict(
                    password=password,
                    privileges=list(privileges),
                ),
            ),
    )
    return password
        def wrapper(*args, **kwargs):
            key = "{0}: {1}".format(request.remote_addr, request.path)

            try:
                remaining = limit - int(redis.get(key))
            except (ValueError, TypeError):
                remaining = limit
                redis.set(key, 0)

            expires_in = redis.ttl(key)
            if not expires_in:
                redis.expire(key, window)
                expires_in = window

            g.rate_limits = (limit, remaining-1, time()+expires_in)

            if remaining > 0:
                redis.incr(key, 1)
                return func(*args, **kwargs)
            return TOO_MANY_REQUESTS
Beispiel #7
0
def load_regions():
    csv = read_csv_from_s3('redshift/attrs/attrs_regioes.csv')
    df = pd.read_csv(csv,
                     sep=';',
                     header=0,
                     names=['id', 'name_en', 'abbr_en', 'name_pt', 'abbr_pt'])

    regions = {}

    for _, row in df.iterrows():
        region = {
            'id': row['id'],
            'name_en': row["name_en"],
            'abbr_en': row['abbr_en'],
            'name_pt': row["name_pt"],
            'abbr_pt': row['abbr_pt'],
        }

        regions[row['id']] = region
        redis.set('region/' + str(row['id']), pickle.dumps(region))

    redis.set('region', pickle.dumps(regions))

    print "Regions loaded."
Beispiel #8
0
def get(openid):
    """获取最新的学院新闻"""
    # 优先读取缓存
    redis_key = 'wechat:school_news'
    news_cache = redis.get(redis_key)
    if news_cache:
        content = ast.literal_eval(news_cache.decode())
        wechat_custom.send_news(openid, content)
    else:
        url = current_app.config['SCHOOL_NEWS_URL']
        try:
            res = requests.get(url, timeout=6)
        except Exception as e:
            app.logger.warning(u'学院官网连接超时出错:%s' % e)
            content = u'学院官网连接超时\n请稍后重试'
            wechat_custom.send_text(openid, content)
        else:
            soup = BeautifulSoup(res.text, "html.parser")
            rows = soup.find("tr", {
                "class": "newsMoreLine"
            }).find_all('a')[:5]  # 图文推送5数
            content = []
            for row in rows:
                title = row.text
                link = urljoin(url, row['href'])
                data = {
                    "title": title,
                    "picurl":
                    'http://d.hiphotos.baidu.com/baike/w%3D268%3Bg%3D0/sign=22959be8277f9e2f70351a0e270b8e19/35a85edf8db1cb13ca4a151ddd54564e92584b98.jpg',
                    "description": "",
                    "url": link
                }
                content.append(data)
            #  缓存结果12小时
            redis.set(redis_key, content, 3600 * 12)
            wechat_custom.send_news(openid, content)
Beispiel #9
0
def create_short_link():
    """
    post:
        create short link
    get:
        show short link if previous request was post short link 
    """
    full_link, short_link = None, None

    if request.method == 'POST':
        if not re.match(regex, request.form['full_link']):
            abort(400, 'Link is broken')
        full_link = request.form['full_link']
        redis.incr('linked_id')
        counter = int(redis.get('linked_id'))
        short_link = comp.compress_url(counter)
        redis.set(short_link, full_link, nx=True)
        short_link = request.url + short_link
        app.logger.info(f'create short link: {short_link}')
    return render_template('main.html',
                           context={
                               'full_link': full_link,
                               'short_link': short_link
                           })
Beispiel #10
0
def create_room():
    if 'manager_id' not in session:
        return jsonify(api_format(status.HTTP_406_NOT_ACCEPTABLE, "you are not login"))
    json_content = request.get_json()
    room_name = json_content['room_name']
    description = json_content['description']
    start_time = int(json_content['start_time'])
    end_time = int(json_content['end_time'])
    manager_id = session['manager_id']

    while True:
        room_id = generate_random_num_str(6)
        if Room.query.get(room_id) is None:
            break
    db.session.add(
        Room(
            room_id=room_id,
            room_name=room_name,
            start_time=start_time,
            end_time=end_time,
            description=description,
            manager_id=manager_id
        )
    )
    db.session.commit()
    redis.set(room_id + ':message_num', 0)
    return jsonify(api_format(
        status.HTTP_200_OK,
        "ok",
        {
            'room_id': room_id,
            'room_name': room_name,
            'start_time': start_time,
            'end_time': end_time,
            'description': description
        }))
Beispiel #11
0
def update_config():
    """
    更新配置
    :return:
    """
    post = request.values
    form = ConfigForm(post)
    if not form.validate():
        return Reply.error(form.errors)
    operator_code = "op_{operator_code}".format(
        operator_code=form.operator_code.data)
    data = redis.get(operator_code)
    if data is None:
        return Reply.error("此运营商code不存在")
    return Reply.success("ok") if redis.set(
        operator_code, form.config.data) else Reply.error("failed")
Beispiel #12
0
def update_wechat_token():
    """ 刷新access_token 和 jsapi_ticket """
    wechat = init_wechat_sdk()
    wechat_client = wechat['client']
    access_token_dic = wechat_client.fetch_access_token()
    access_token = access_token_dic['access_token']
    token_expires_at = access_token_dic['expires_in']

    redis.set("wechat:access_token", access_token, 7000)
    redis.set("wechat:access_token_expires_at", token_expires_at, 7000)

    jsapi_ticket_dic = wechat_client.jsapi.get_ticket()
    jsapi_ticket = jsapi_ticket_dic['ticket']
    ticket_expires_at = jsapi_ticket_dic['expires_in']

    redis.set("wechat:jsapi_ticket", jsapi_ticket, 7000)
    redis.set("wechat:jsapi_ticket_expires_at", ticket_expires_at, 7000)

    current_app.logger.warning("运行update_wechat_token结束")
Beispiel #13
0
def load_hedu_course():
    csv = read_csv_from_s3('redshift/attrs/attrs_hedu_course.csv')
    df = pd.read_csv(csv,
                     sep=';',
                     header=0,
                     names=['id', 'name_en', 'name_pt'],
                     converters={"id": str})

    hedu_courses = {}
    hedu_courses_field = {}

    for _, row in df.iterrows():
        if len(row['id']) == 2:
            hedu_course_field = {
                'id': row['id'],
                'name_pt': row["name_pt"],
                'name_en': row["name_en"],
            }

            redis.set('hedu_course_field/' + str(row['id']),
                      pickle.dumps(hedu_course_field))
            hedu_courses_field[row['id']] = hedu_course_field

    for _, row in df.iterrows():
        if len(row['id']) == 6:
            hedu_course = {
                'id': row['id'],
                'name_pt': row["name_pt"],
                'name_en': row["name_en"],
                'hedu_course_field': hedu_courses_field[row['id'][:2]]
            }

            redis.set('hedu_course/' + str(row['id']),
                      pickle.dumps(hedu_course))
            hedu_courses[row['id']] = hedu_course

    redis.set('hedu_course', pickle.dumps(hedu_courses))
    redis.set('hedu_course_field', pickle.dumps(hedu_courses_field))

    print "HEDU Courses loaded."
Beispiel #14
0
def load_occupations():
    csv = read_csv_from_s3('redshift/attrs/attrs_cbo.csv')
    df = pd.read_csv(csv,
                     sep=';',
                     header=0,
                     names=['id', 'name_en', 'name_pt'],
                     converters={"id": str})

    occupations_family = {}
    occupations_group = {}

    for _, row in df.iterrows():
        if len(row['id']) == 1:
            occupation_group = {
                'id': row['id'],
                'name_pt': row["name_pt"],
                'name_en': row["name_en"]
            }

            redis.set('occupation_group/' + str(row['id']),
                      pickle.dumps(occupation_group))
            occupations_group[row['id']] = occupation_group

    for _, row in df.iterrows():
        if len(row['id']) == 4:
            occupation_family = {
                'id': row['id'],
                'name_pt': row["name_pt"],
                'name_en': row["name_en"],
                'occupation_group': occupations_group[row['id'][0]],
            }

            redis.set('occupation_family/' + str(row['id']),
                      pickle.dumps(occupation_family))
            occupations_family[row['id']] = occupation_family

    redis.set('occupation_family', pickle.dumps(occupations_family))
    redis.set('occupation_group', pickle.dumps(occupations_group))

    print "Occupations loaded."
Beispiel #15
0
def BJYCreate():
    env_flag = request.args.get("env_flag")
    env_num = request.args.get("env_num")
    phone = request.args.get("phone")
    developer = request.args.get("developer")
    WebRTC = request.args.get("WebRTC")
    redis.set("editBJYRoom", "{'phone':'%s'}" % phone, 600)
    redis.set("editBJYRoomFlag", "{'env_flag':'%s'}" % env_flag, 600)
    redis.set("editBJYRoomNum", "{'env_num':'%s'}" % env_num, 600)
    # url = "http://127.0.0.1:5000/scheduleSuite?domain=ClassIn&env_flag=%s&env_num=%s&developer=%s&WebRTC=%s"%(env_flag,env_num,developer,WebRTC)
    url = "http://uwsgi.sys.bandubanxie.com/scheduleSuite?domain=BJY&env_flag=%s&env_num=%s&developer=%s&WebRTC=%s" % (
        env_flag, env_num, developer, WebRTC)
    wctv = {}
    resp = requests.get(url=url)
    time.sleep(10)
    wctv["schedule"] = resp.text
    return make_response(jsonify(wctv))
Beispiel #16
0
def ClassInCreate():
    env_flag = request.args.get("env_flag")
    env_num = request.args.get("env_num")
    phone = request.args.get("phone")
    developer = request.args.get("developer")
    redis.set("ClassInCreate", "{'phone':'%s'}" % phone, 600)
    redis.set("ClassInCreateFlag", "{'env_flag':'%s'}" % env_flag, 600)
    redis.set("ClassInCreateNum", "{'env_num':'%s'}" % env_num, 600)
    url = "http://uwsgi.sys.bandubanxie.com/scheduleSuite?domain=ClassIn&env_flag=%s&env_num=%s&developer=%s" % (
        env_flag, env_num, developer)
    wctv = {}
    resp = requests.get(url=url)
    time.sleep(10)
    value = redis.get("v1/admin/product_chapter/create_live_room.json")
    wctv["schedule"] = resp.text
    wctv["RoomMsg"] = value
    return make_response(jsonify(wctv))
Beispiel #17
0
def add_to_redis(user, mode):
    token = random.randint(10000, 99999)
    name = f'{user.id}_{mode.lower()}'
    redis.set(name=name, value=token, ex=14400)
    return token
Beispiel #18
0
 def update(self):
     return (redis.sismember("messages", self.sha) and
             redis.set("message:{}".format(self.sha), self.to_dict()))
Beispiel #19
0
 def tearDown(self):
     if self.counter_value is not None:
         redis.set('counter', self.counter_value)
Beispiel #20
0
""" worker.py
"""
from time import sleep
import json

from app import redis
from parser import build_events_list

while True:
    redis.set('events_list', json.dumps(build_events_list()))
    sleep(60*15)
Beispiel #21
0
def _db_set(key, value):
    redis.set(key, json.dumps(value))
Beispiel #22
0
def set_cache(key, values):
    redis.set(key, pickle.dumps(values))
Beispiel #23
0
def scrape():
    STATISTICS_URL = 'https://covid19.yale.edu/yale-covid-19-statistics'

    r = requests.get(STATISTICS_URL)
    soup = BeautifulSoup(r.text, 'html.parser')
    body = soup.find('div', {'class': 'field-item even'})

    # TODO: find a better way to target the first div
    # Unfortunately it doesn't have identifying information other than a class .alert-[color]
    alert_level = body.find('div')['class'][0].split('-')[1]
    redis.set('alert_level', alert_level)

    TABLES_URL = 'https://covid19.yale.edu/yale-statistics/yale-covid-19-statistics-data-tables'

    r = requests.get(TABLES_URL)
    soup = BeautifulSoup(r.text, 'html.parser')
    body = soup.find('div', {'class': 'field-item even'})

    tables = []
    for table in body.find_all('table'):
        caption = table.find('caption').text
        output_rows = []
        for table_row in table.find_all('tr'):
            columns = table_row.find_all(['th', 'td'])
            output_row = []
            for column in columns:
                output_row.append(column.text)
            output_rows.append(output_row)
        tables.append(output_rows)

    # Split up "peak" columns into number and date
    tables = [split_peak(table) for table in tables]

    # Merge Yale tables into one
    yale_table = merge_tables(tables[:2])
    yale_table[0] = [
        'population',
        'total_cases',
        'weekly_cases',
        'new_case_peak',
        'new_case_peak_date',
        # Why do they include useless statistics like this??
        'most_recent_date_below_5_percent_positivity',
        # TODO I wish
        #'total_positivity_rate',
        'weekly_positivity_rate',
        'peak_positivity_rate',
        'peak_positivity_rate_date',
    ]

    yale_data = to_dicts(yale_table)

    yale_data[0]['populations'] = {
        population['population'].lower(): population
        for population in yale_data[1:]
    }
    yale_data = yale_data[0]
    del yale_data['population']

    redis.set('yale', json.dumps(yale_data))

    # Merge Connecticut tables into one
    """
    connecticut_table = merge_tables(tables[2:])
    connecticut_table[0][0] = 'County'

    connecticut_data = to_dicts(connecticut_table)
    redis.set('connecticut', json.dumps(connecticut_data))
    """

    redis.set('last_updated', int(time.time()))
    print('Updated data.')
Beispiel #24
0
def load_municipalities():
    csv = read_csv_from_s3('redshift/attrs/attrs_municipios.csv')
    df = pd.read_csv(csv,
                     sep=';',
                     header=0,
                     names=[
                         'uf_id', 'uf_name', 'mesorregiao_id',
                         'mesorregiao_name', 'microrregiao_id',
                         'microrregiao_name', 'municipio_id', 'municipio_name',
                         'municipio_id_mdic'
                     ],
                     converters={
                         "uf_id": str,
                         "mesorregiao_id": str,
                         "microrregiao_id": str,
                         "municipio_id": str
                     })

    municipalities = {}
    microregions = {}
    mesoregions = {}

    for _, row in df.iterrows():
        municipality = {
            'id': row['municipio_id'],
            'name_pt': row["municipio_name"],
            'name_en': row["municipio_name"],
            'mesoregion': {
                'id': row["mesorregiao_id"],
                'name_pt': row["mesorregiao_name"],
                'name_en': row["mesorregiao_name"],
            },
            'microregion': {
                'id': row["microrregiao_id"],
                'name_pt': row["microrregiao_name"],
                'name_en': row["microrregiao_name"],
            },
            'state':
            pickle.loads(redis.get('state/' + row['municipio_id'][:2])),
            'region':
            pickle.loads(redis.get('region/' + row['municipio_id'][0])),
        }

        municipalities[row['municipio_id']] = municipality
        microregions[row['microrregiao_id']] = municipality['microregion']
        mesoregions[row['mesorregiao_id']] = municipality['mesoregion']

        redis.set('municipality/' + str(row['municipio_id']),
                  pickle.dumps(municipality))
        redis.set('microregion/' + str(row['microrregiao_id']),
                  pickle.dumps(municipality['microregion']))
        redis.set('mesoregion/' + str(row['mesorregiao_id']),
                  pickle.dumps(municipality['mesoregion']))

    redis.set('municipality', pickle.dumps(municipalities))
    redis.set('microregion', pickle.dumps(microregions))
    redis.set('mesoregion', pickle.dumps(mesoregions))

    print "Municipalities, microregions and mesoregions loaded."
Beispiel #25
0
 def save(self):
     return (redis.sadd("tokens", self.sha) and
             redis.set("token:{}".format(self.sha), self.to_dict()))
Beispiel #26
0
def load_products():
    csv = read_csv_from_s3('redshift/attrs/attrs_hs.csv')
    df = pd.read_csv(
        csv,
        sep=';',
        header=0,
        names=['id', 'name_pt', 'name_en', 'profundidade_id', 'profundidade'],
        converters={"id": str})

    products = {}
    product_sections = {}
    product_chapters = {}

    for _, row in df.iterrows():
        if row['profundidade'] == 'Seção':
            product_section_id = row['id']

            product_section = {
                'id': product_section_id,
                'name_pt': row["name_pt"],
                'name_en': row["name_en"],
            }

            redis.set('product_section/' + str(product_section_id),
                      pickle.dumps(product_section))
            product_sections[product_section_id] = product_section

        elif row['profundidade'] == 'Capítulo':
            product_chapter_id = row['id'][2:]

            product_chapter = {
                'id': product_chapter_id,
                'name_pt': row["name_pt"],
                'name_en': row["name_en"],
            }

            redis.set('product_chapter/' + str(product_chapter_id),
                      pickle.dumps(product_chapter))
            product_chapters[product_chapter_id] = product_chapter

    for _, row in df.iterrows():
        if row['profundidade'] == 'Posição':
            product_id = row['id'][2:]
            product_section_id = row["id"][:2]
            product_chapter_id = row["id"][2:4]

            product = {
                'name_pt': row["name_pt"],
                'name_en': row["name_en"],
                'product_section': product_sections[product_section_id],
                'product_chapter': product_chapters[product_chapter_id],
            }

            products[product_id] = product
            redis.set('product/' + str(product_id), pickle.dumps(product))

    redis.set('product', pickle.dumps(products))
    redis.set('product_section', pickle.dumps(product_sections))
    redis.set('product_chapter', pickle.dumps(product_chapters))

    print "Products loaded."
Beispiel #27
0
def cache(key, url):
    txt = get_content_text(url)
    # print(txt)
    redis.set(key, txt, ex=86400)
Beispiel #28
0
 def save_cache(self, ttl=cache_time):
     # 缓存数据
     if self.result['status_code'] == 200:
         redis.set(self.redis_key, pickle.dumps(self.result['data']), ttl)
Beispiel #29
0
def run_exec(socket, uuid):
    workflow_info = (
        db.table("zbn_workflow")
        .select("uuid", "name", "start_app", "end_app", "flow_json", "flow_data")
        .where("uuid", uuid)
        .first()
    )

    if workflow_info:
        start_app = workflow_info["start_app"]
        end_app = workflow_info["end_app"]

        flow_json = json.loads(workflow_info["flow_json"])
        flow_data = json.loads(workflow_info["flow_data"])

        # for r in flow_json["edges"]:
        #     print(r["label"], r["source"], r["target"])

        global_data = {}

        target_app = find_start_app(edges=flow_json["edges"], start_app=start_app)

        add_execute_logs(
            socket=socket, uuid=uuid, app_uuid=start_app, app_name="开始", result="剧本开始执行"
        )

        is_while = True

        while is_while:
            try:
                # 拿到当前APP数据
                if_else, source_app, next_app = find_next_app(
                    edges=flow_json["edges"], next_app=target_app
                )
            except Exception:
                add_execute_logs(
                    socket=socket,
                    uuid=uuid,
                    app_uuid="",
                    app_name="",
                    result="当前剧本不具有可执行条件",
                )
                is_while = False
                break

            key = target_app + "_sum"
            if redis.exists(key) == 1:
                sum = redis.get(key)
                redis.set(key, int(sum) + 1, ex=3)
            else:
                redis.set(key, 1, ex=3)

            # 当前APP执行数据
            source_info = flow_data[source_app]
            # print(source_app)
            s, ifelse_result = get_app_data(
                socket=socket,
                uuid=uuid,
                app_uuid=source_app,
                app_info=source_info,
                global_data=global_data,
            )

            if not s:
                add_execute_logs(
                    socket=socket,
                    uuid=uuid,
                    app_uuid=end_app,
                    app_name=flow_data.get(source_app).get("name"),
                    result="执行错误:{}".format(ifelse_result),
                )
                add_execute_logs(
                    socket=socket,
                    uuid=uuid,
                    app_uuid=end_app,
                    app_name="结束",
                    result="剧本执行结束",
                )
                is_while = False

            if if_else != "":
                if if_else == ifelse_result:
                    target_app = next_app
            else:
                target_app = next_app

            if next_app == end_app:
                add_execute_logs(
                    socket=socket,
                    uuid=uuid,
                    app_uuid=end_app,
                    app_name="结束",
                    result="剧本执行结束",
                )
                is_while = False
Beispiel #30
0
 def wrap(*args, **kwargs):
     ret = dfunc(*args, **kwargs)
     redis.set(config.REFRESH_KEY, data)
     return ret
Beispiel #31
0
def load_industries():
    csv = read_csv_from_s3('redshift/attrs/attrs_cnae.csv')
    df = pd.read_csv(csv,
                     sep=',',
                     header=0,
                     names=['id', 'name_en', 'name_pt'],
                     converters={"id": str})

    industry_sections = {}
    industry_divisions = {}
    industry_classes = {}

    industry_classes['-1'] = {
        'name_pt': 'Não definido',
        'name_en': 'Undefined'
    }

    industry_sections['0'] = {
        'name_pt': 'Não definido',
        'name_en': 'Undefined'
    }

    for _, row in df.iterrows():
        if len(row['id']) == 1:
            industry_section = {
                'id': row['id'],
                'name_pt': row["name_pt"],
                'name_en': row["name_en"]
            }

            redis.set('industry_section/' + str(row['id']),
                      pickle.dumps(industry_section))
            industry_sections[row['id']] = industry_section

    for _, row in df.iterrows():
        if len(row['id']) == 3:
            division_id = row['id'][1:3]

            industry_division = {
                'id': division_id,
                'name_pt': row["name_pt"],
                'name_en': row["name_en"],
                'industry_section': row["id"][0]
            }

            redis.set('industry_division/' + str(division_id),
                      pickle.dumps(industry_division))
            industry_divisions[division_id] = industry_division

    for _, row in df.iterrows():
        if len(row['id']) == 6:
            class_id = row["id"][1:]

            industry_classe = {
                'id': class_id,
                'name_pt': row["name_pt"],
                'name_en': row["name_en"],
                'industry_section': industry_sections[row["id"][0]],
                'industry_division': industry_divisions[row["id"][1:3]]
            }

            redis.set('industry_class/' + str(class_id),
                      pickle.dumps(industry_classe))
            industry_classes[class_id] = industry_classe

    redis.set('industry_class', pickle.dumps(industry_classes))
    redis.set('industry_division', pickle.dumps(industry_divisions))
    redis.set('industry_section', pickle.dumps(industry_sections))

    print "Industries loaded."
Beispiel #32
0
def chessboard(game_id):
    current_app.logger.error("chessboard, game_ida: " + str(game_id))
    pc_id = request.args.get('pc_id')
    if not pc_id:
        return "No pc"
    # Existing game
    if redis.exists(game_id):
        current_app.logger.error("   Existing game")
        chess_game = load_game(game_id)
        current_app.logger.error(str(chess_game))
        # if new player
        if not get_player_color(chess_game, pc_id):
            current_app.logger.error("       New player")
            # if empty seat
            if chess_game.headers["Black"] == "?":
                chess_game.headers["Black"] = str(pc_id)
            # if game full
            else:
                return "Game full"
        color = get_player_color(chess_game, pc_id)
        current_app.logger.error(
            str("   Player {} is color {}").format(str(pc_id), str(color)))
    # New game
    else:
        chess_game = chess.pgn.Game()
        current_app.logger.error("   New game")
        chess_game.headers["White"] = str(pc_id)
        chess_game.headers["Event"] = str(game_id)
        color = "White"
        current_app.logger.error(
            str("   Player {} is color {}").format(str(pc_id), str(color)))
    redis.set(game_id, str(chess_game))
    board = chess_game.board()
    for move in chess_game.mainline_moves():
        board.push(move)
    current_app.logger.error(str(board.fen()))
    turn = "White" if board.turn else "Black"
    if board.is_checkmate() or board.is_stalemate():
        if board.result() == "1-0":
            winner = chess_game.headers["White"]
        if board.result() == "0-1":
            winner = chess_game.headers["Black"]
        else:
            winner = ""
        return render_template('chessboard.html',
                               color=color,
                               white=chess_game.headers["White"],
                               black=chess_game.headers["Black"],
                               game_id=game_id,
                               pgn=str(chess_game),
                               fen=str(board.fen()),
                               turn=turn,
                               game_over=True,
                               winner=winner)
    else:
        return render_template('chessboard.html',
                               color=color,
                               white=chess_game.headers["White"],
                               black=chess_game.headers["Black"],
                               game_id=game_id,
                               pgn=str(chess_game),
                               fen=str(board.fen()),
                               turn=turn,
                               game_over=False,
                               winner=None)
Beispiel #33
0
def teams_helper(sport=None):
    """
    Generic helper function to scrape scoring data from STATS's
    JavaScript file.
    """

    flat_list = query_string_arg_to_bool(PARAM_FLAT_LIST)

    rv = fetch_cached_data(args=PARAM_FLAT_LIST if flat_list else None)

    if rv is not None:
        return rv

    # STATs does not order NFL teams
    nfl_teams = [
        "Atlanta Falcons", "Buffalo Bills", "Chicago Bears",
        "Cincinnati Bengals", "Cleveland Browns", "Dallas Cowboys",
        "Denver Broncos", "Detroit Lions", "Green Bay Packers",
        "Tennessee Titans", "Indianapolis Colts", "Kansas City Chiefs",
        "Oakland Raiders", "St. Louis Rams", "Miami Dolphins",
        "Minnesota Vikings", "New England Patriots", "New Orleans Saints",
        "New York Giants", "New York Jets", "Philadelphia Eagles",
        "Arizona Cardinals", "Pittsburgh Steelers", "San Diego Chargers",
        "San Francisco 49ers", "Seattle Seahawks", "Tampa Bay Buccaneers",
        "Washington Redskins", "Carolina Panthers", "Jacksonville Jaguars", '',
        '', "Baltimore Ravens", "Houston Texans"
    ]

    soup = help_fetch_soup(url=TEAMS_URL.replace(URL_TOKEN, sport))

    stack = []
    redis_stack = []
    league_stack = []
    division_stack = []
    league = None
    division = None

    # Iterate over each conference
    for table in soup("table"):

        for row in table("tr"):
            if row.get("class") is None:
                continue

            cells = row("td")

            # Conference Row
            if "shsTableTtlRow" in row.get("class"):
                if flat_list:
                    continue

                if division_stack and division:
                    league_stack.append({division: division_stack})

                    division_stack = []

                if league_stack and league:
                    stack.append({league: league_stack})

                    league_stack = []

                league = format_division(row)

            # Division Row
            elif "shsTableSubttlRow" in row.get("class"):
                if flat_list:
                    continue

                if division_stack and division:
                    league_stack.append({division: division_stack})

                    division_stack = []

                division = format_division(row)

            # Team Row
            else:
                team = cells[0].extract().text.strip().encode("utf-8")

                # Save the team as a flat list for persistent storage
                redis_stack.append(team)

                if flat_list:
                    stack.append(team)
                else:
                    division_stack.append(team)
        else:
            if division_stack and division:
                league_stack.append({division: division_stack})

                division_stack = []

            if league_stack and league:
                stack.append({league: league_stack})

                league_stack = []

    out = prepare_json_output(stack)
    del soup, division_stack, league_stack, stack

    redis_key = app.config["REDIS_KEY_TEAMS"].replace(
        app.config["REDIS_KEY_TOKEN_SPORT"], "nfl" if "fb" == sport else sport)

    if not redis.exists(redis_key):
        if "fb" == sport:
            redis_stack = nfl_teams

        # Convert the object to a JSON string
        redis.set(name=redis_key,
                  value=dumps(prepare_json_output(redis_stack)))

    del redis_key, redis_stack

    cache_data(
        data=out,
        args=PARAM_FLAT_LIST if flat_list else None,
        timeout=60 * 60 * 24 * 300  # Cache for 300 days
    )

    return out
def get_fund():
    article_url = request.form.get('article_link')

    if redis.exists(article_url):
        # Get from the redis cache
        fund_match = pickle.loads(redis.get(article_url))
        return jsonify(fund=fund_match)

    else:
        print('checking article')
        # Check the article
        article = Article.query.filter_by(article_link=article_url).first()

        # article exists
        if article and article.fund_name is not None:
            print('article exists')
            widget_status = article.widget_status
            print(widget_status)
            if widget_status:
                fund = Fund.query.filter_by(
                    fund_name=article.fund_name).first()
                # Save to redis cache
                redis.set(article_url, pickle.dumps(fund.serialize()))
                return jsonify(fund=fund.serialize())

        else:
            article_title = request.form.get('article_title')
            article_text = request.form.get('article_text')

            if 'emorywheel' in article_url:
                publisher_id = 'The Emory Wheel'

            else:
                publisher_id = 'Indiana Daily Student'

            if article_title is None:

                if publisher_id == 'The Emory Wheel':
                    article_info = emory_scraper(article_url)

                else:
                    article_info = indiana_scraper(article_url)

                article_title = article_info['title']
                article_text = article_info['content']
                article_text = article_text[:len(article_text) // 2]

            else:
                article_title = article_title.lower()
                article_text = article_text.lower()[:len(article_text) // 2]

                # store article data in a json file and upload to a s3 bucket
                # save_article_data(s3_client=s3, article_link=article_url, article_title=article_title,
                #                   article_date_time=datetime.now(), article_text=article_text)

            fund = get_best_fund(article_text)

            # fund name is null
            if article:
                article.fund_name = fund.fund_name

            else:
                # Add to database
                article = Article(
                    article_link=article_url,
                    article_title=article_title,
                    publisher_id=publisher_id,
                    widget_status=True,
                    date_published=datetime.now(),
                    fund_name=fund.fund_name,
                    project_id1=None,
                    project_id2=None,
                    project_id3=None,
                    project_id4=None,
                    project_id5=None,
                    project_id6=None,
                    edited_by_publisher=False,
                    edited_by_newspark=False,
                )
                db.session.add(article)

            db.session.commit()

            redis.set(article_url, pickle.dumps(fund.serialize()))
            return jsonify(fund=fund.serialize())
def remove_project_from_article():
    """
    Removes project from this article
    :param: JWT token for a owner, article link, project id
    :return: Success if project removed from this article
    """

    username = get_jwt_identity()
    exists = Owner.query.filter_by(username=username).first()

    if exists:
        article_link = request.form.get('article_link')
        project_id = int(request.form.get('project_id'))

        article = Article.query.filter_by(article_link=article_link).first()

        if article:
            ids = article.get_project_ids()

            for i in range(len(ids)):
                if ids[i] == project_id:
                    ids[i] = None

            ids = sorted(ids, key=lambda x: x is None)  # move None's to back

            article.project_id1 = ids[0]
            article.project_id2 = ids[1]
            article.project_id3 = ids[2]
            article.project_id4 = ids[3]
            article.project_id5 = ids[4]
            article.project_id6 = ids[5]

            if username == 'admin':
                article.edited_by_newspark = True

            else:
                article.edited_by_publisher = True

            db.session.commit()

            sql_query = '''select project_id
                            from projects, organizations
                            where projects.organization_id=organizations.email
                            and projects.removed=FALSE
                            and organizations.verified=TRUE;'''
            conn = db.engine.connect().connection
            df = pd.read_sql(sql_query, conn)
            other_ids = list(filter(lambda x: not (x in ids), list(df['project_id'])))

            # store recommendations in a json file and upload to a s3 bucket
            update_recommendations(s3_client=s3, article_link=article_link, ids=ids, other_ids=other_ids)

            # Re-run the commands to get the right data for the articles in the cache
            # TODO: set up a celery worker to do all of this
            if redis.exists(article_link):
                redis.delete(article_link)

            project_info_list = get_projects_from_article(article_url=article_link,
                                                          num_ids=application.config['NUM_CHOICES'])
            # Save to redis cache
            redis.set(article_link, pickle.dumps(project_info_list))

            return jsonify("Success")

        else:
            return jsonify("Article does not exist.")

    else:
        return jsonify("Publisher does not exist.")
Beispiel #36
0
def push():
    """ 推送图文消息"""
    newsform = NewsForm()
    form = TextForm()
    wechat = init_wechat_sdk()
    client = wechat['client']
    pushnews = Pushnews(author_id=current_user.id)
    if request.method == 'POST':
        #  把推送保存到数据库, 并发送微信
        title = newsform.title.data
        body = newsform.body.data
        to_group = Group.query.get(newsform.group.data)
        to_user = []
        to_user_id = []
        for u in to_group.wechatusers:
            to_user.append(u.openid)
            to_user_id.append(u.id)

        try:
            #  添加到图文素材库
            pushnews.title = title
            pushnews.body = body
            pushnews.to_group = to_group
            pushnews.to_confirmed = json.dumps(to_user_id)
            pushnews.save()
            body_html = pushnews.body_html
            #  更新最新推送的通知缓存
            articles = [{
                'title':
                title,
                'show_cover_pic':
                0,
                'content':
                body_html,  # 看看是否需要处理html
                'author':
                current_user.name,
                'thumb_media_id':
                current_app.config['NEWS_THUMB_MEDIA_ID'],
                'digest':
                current_app.config['NEWS_DIGEST'],
                'content_source_url':
                current_app.config['NEWS_CONTENT_SOURCE_URL'],
                'need_open_comment':
                '1',
                'only_fans_can_comment':
                '1'
            }]
            res = client.material.add_articles(articles)
            #  XXX, 为什么没有保存到数据库, 但却发送成功了
            media_id = res['media_id']
            current_app.logger.warning('添加的图文的media_id: %s' % media_id)
            #  保存到数据库 TODO 多添加几个字段
            pushnews.media_id = media_id
            pushnews.update()
            redis.set("wechat:last_push_time",
                      time.strftime('%Y-%m-%d %H:%M:%S'))
            redis.hmset(
                "wechat:last_push", {
                    "create_time": time.strftime('%Y-%m-%d %H:%M:%S'),
                    "to_confirmed": to_user_id,
                    "media_id": media_id,
                    "pushtype": 'news'
                })
            redis_pushtext_prefix = "wechat:pushnews:" + media_id
            redis.hmset(
                redis_pushtext_prefix, {
                    "media_id": media_id,
                    "to_user": to_user,
                    "create_time": time.strftime('%Y-%m-%d %H:%M:%S'),
                    "to_confirmed": to_user_id
                })

        except Exception as e:
            print(e)
            current_app.logger.warning('添加图文到数据库和缓存失败')

        try:
            #  发送图文消息
            send_result = client.message.send_mass_article(to_user, media_id)
        except Exception as e:
            print(e)
        return redirect(url_for('wechat.user'))
    body_value = None
    return render_template('wechat/messagepush.html',
                           title=pushnews.title,
                           newsform=newsform,
                           form=form,
                           pushpost=pushnews,
                           body_value=body_value)
def format_ansible_response_device_data(device_data):
    ansible_hardware_data = device_data["status"]
    ansible_hardware_data = json.loads(ansible_hardware_data)
    ansible_hardware_data_success = ansible_hardware_data.get("success", "")
    new_update_data_dict = {}
    if ansible_hardware_data_success:
        for host in ansible_hardware_data_success:
            hardware_data = ansible_hardware_data_success[host][
                "ansible_facts"]
            device_size_total = 0
            ansible_devices = hardware_data['ansible_devices']
            # 获取磁盘总大小:G
            for device in ansible_devices:
                if device.startswith('sd'):
                    if "GB" in ansible_devices[device]["size"]:
                        device_size = float(
                            ansible_devices[device]["size"].replace("GB", ""))
                    elif "TB" in ansible_devices[device]["size"]:
                        device_size = 1024 * float(
                            ansible_devices[device]["size"].replace("TB", ""))
                    device_size_total += device_size
            device_size_total = format(device_size_total, ".1f")
            # 获取内存总大小:G
            total_memory = int(hardware_data['ansible_memtotal_mb']) / 1024

            # 获取cpu个数
            cpu_count = int(hardware_data["ansible_processor_count"])

            # 获取操作系统名称  centos  6.5-x86_64
            os_name = hardware_data["ansible_distribution"]
            os_version = hardware_data["ansible_distribution_version"]
            os_bit = hardware_data["ansible_architecture"]
            os_version_bit = os_version + "-" + os_bit
            # 是否为虚拟机, VMware 或kvm是虚拟机,由于ansible无法区分刀片和架式机,故设备类型只更新虚拟机
            virtualization_type = hardware_data["ansible_virtualization_type"]
            # 设备品牌
            ansible_system_vendor = hardware_data["ansible_system_vendor"]
            # 设备型号
            ansible_product_name = hardware_data["ansible_product_name"]
            # 设备序列号
            ansible_product_serial = hardware_data["ansible_product_serial"]
            # 主机名
            ansible_hostname = hardware_data["ansible_nodename"]
            # 所有的ipv4地址
            ansible_all_ipv4_addresses = hardware_data[
                "ansible_all_ipv4_addresses"]
            # 所有的ipv6地址
            ansible_all_ipv6_addresses = hardware_data[
                "ansible_all_ipv6_addresses"]
            # 获取os_id
            if os_name and os_version_bit:
                update_cmdb_meta = UpdateCmdbMeta()
                os_id = update_cmdb_meta.get_os_id(os_name, os_version_bit)
            # 获取ip地址id

            new_update_data_dict["ip"] = host
            new_update_data_dict["disk_size"] = int(
                float(device_size_total) + 0.5)
            new_update_data_dict["memory_size"] = int(
                float(total_memory) + 0.5)
            new_update_data_dict["cpu_number"] = cpu_count
            new_update_data_dict["os_name"] = os_name
            new_update_data_dict["os_version"] = os_version_bit
            new_update_data_dict["os_id"] = os_id
            new_update_data_dict["virtualization_type"] = virtualization_type
            new_update_data_dict[
                "ansible_system_vendor"] = ansible_system_vendor
            new_update_data_dict["ansible_product_name"] = ansible_product_name
            new_update_data_dict[
                "ansible_product_serial"] = ansible_product_serial
            new_update_data_dict["ansible_hostname"] = ansible_hostname
            new_update_data_dict[
                "ansible_all_ipv4_addresses"] = ansible_all_ipv4_addresses
            new_update_data_dict[
                "ansible_all_ipv6_addresses"] = ansible_all_ipv6_addresses
            device_data_from_es = list(
                get_all_device_data_from_cmdb_es(
                    serial_number=ansible_product_serial, es_index="devices"))

            # 将硬件数据存到redis
            if device_data_from_es:
                device_data_from_es_item = device_data_from_es[0]["_source"]
                if device_data_from_es_item:
                    new_update_data_dict[
                        "device_id"] = device_data_from_es_item["id"]
                    redis.set(
                        "zeus_cmdb_device_info:%s" %
                        (device_data_from_es_item["id"]),
                        json.dumps(new_update_data_dict))
                else:
                    # 如果设备序列号无法在es中查到,说明cmdb没有录入该设备,需要提示设备管理员
                    pass
    else:
        new_update_data_dict = ansible_hardware_data
    return new_update_data_dict
Beispiel #38
0
def product(id):
    product = Product.query.get_or_404(id)
    product_key = "product-%s" % product.id
    redis.set(product_key, product.name)
    redis.expire(product_key, 180)
    return render_template("catalog/product.html", product=product)
Beispiel #39
0
 def save(self):
     if redis.sadd("messages", self.sha) and redis.set("message:{}".format(self.sha), self.to_dict()):
         redis.zincrby("new", self.sha, dt2ts(self.date))
         redis.zremrangebyrank("new", 0, -100)
         return True
     return False
Beispiel #40
0
def runDatasApiTest_yunwei():
	"""运维发布版本后使用接口
	:param project:  测试项目
	:param env_num:  测试环境编号
	:param env_flag:  测试环境
	:return:  msg: 执行状态
	"""
	chose_run = {}
	project = request.args.get("project")
	env_num = request.args.get("env_num")
	env_flag = request.args.get("env_flag")
	developer = request.args.get("developer")
	developer_project = request.args.get("developer_project")
	branch = request.args.get("branch")
	try:
		if env_flag != "beta":
			raise Exception("当前环境非测试环境!")
		if project == "":
			raise Exception("项目不能为空!")
		if env_num == "":
			raise Exception("环境编号不能为空!")
		if env_flag == "":
			raise Exception("使用环境不能为空!")
		project_en = db.session.query(runSuiteProject.project_en, runSuiteProject.description).filter_by(project_en=project,use_status=1).first()  #查询项目
		if project_en:  #判断项目存在
			suite_project_en = project_en[0]
			exists_suite = os.path.exists("app/base/pythonProject/suite/%s" % (suite_project_en))  # 判断测试集文件夹是否存在
			if not exists_suite:
				raise Exception("测试集文件夹未存在,请创建后再次运行")
			redis_env_flag_shell = "{project_en}_env_flag".format(project_en=project_en[0])
			redis_env_num_shell = "{project_en}_env_num".format(project_en=project_en[0])
			redis.set(redis_env_flag_shell,env_flag)  #设置测试环境
			redis.set(redis_env_num_shell,env_num)  #设置环境号码
			s.add_set("ENV", env_num=env_num, env_flag=env_flag) #云舒写首页&admin 会使用config.ini配置文件
			redis_host = s.get_env("beta").split(":") if env_flag == "beta" else s.get_env("prod_stage").split(":")
			r = red.Redis(host=redis_host[0], port=int(redis_host[1]), password="******")
			r.set("021ZaJtG17hM310SblvG1NZutG1ZaJtQ",'o38sIv_7FQInsBKJEUExn7wYxoHc&21_bk4dQIEFnYz5w8zJwDqan84UFmV_XVKEO5MJf7fv1pGR8tRH2MAtxpk0Pc1SqDwe5S90CE6TQo1wd346qEA5FQ')  #wacc-order 设置 openId
			isMakeCount = db.session.query(is_Make_User.project_en).filter_by(isMake=1,project_en=project_en[0]).count()  #是否创建用户配置表查询
			#if "admin".upper() not in project_en[0].upper() and "crm".upper() not in project_en[0].upper() and "wacc-tortoise".upper() not in project_en[0].upper():  # 判断项目不等于admin&&crm,新增测试用户
			if isMakeCount:
				try:
					if env_flag in ["stage", "prod"]:
						new_env_flag = ",".join(["stage", "prod"])
					else:
						new_env_flag = "beta"
					new_phone = str(int(db.session.query(func.max(Test_User_Reg.phone)).filter_by(env=new_env_flag).first()[0]) + 1)  # 最大手机号+1
					redis.set("make_user_env_flag", env_flag)
					redis.set("make_user_env_num", env_num)
					redis.set("make_user_phones", new_phone)
					redis.set("make_user_employeetypes", "0")
					result = run.run_yunwei_case("make_user", env_num, env_flag,
											 "Admin 创建用户:{phones}".format(phones=new_phone), "创建测试用户",
												 new_phone=new_phone,developer=developer,developer_project=developer_project,branch=branch)
					datas = Test_User_Reg(phone=new_phone, type="0", env=new_env_flag,description=project)  #新建手机号码存储至数据库
					db.session.add(datas)
					db.session.commit()
					chose_run["new_phone"] = new_phone
				except Exception as e:
					db.session.rollback()
					raise Exception("ErrorMsg: 用户手机号创建失败{phone}".format(phone=new_phone))
				else:
					if chose_run.has_key("new_phone"):  #字典内存在新用户号码,传入新手机号
						# process = Thread(target=run.run_yunwei_case,name="",
						# 				  args=(project_en[0],env_num,env_flag,project_en[1],project,chose_run["new_phone"],
						# 						developer,developer_project,branch))
						# process.start()

						task = run_api_case.apply_async(args=[project_en[0],env_num,env_flag,project_en[1],
											project,chose_run["new_phone"],
								developer,developer_project,branch])
						msg = {"code": 200, "Msg": "执行成功,请查收测试报告", "url": r"http://uwsgi.sys.bandubanxie.com/Report"}
			else:
				chose_run["new_phone"] = None
				# process = Thread(target=run.run_yunwei_case,args=(project_en[0],env_num,env_flag,project_en[1],project,
				# 												  chose_run["new_phone"],developer,developer_project,branch))
				# process.start()
				# project_en,env_num,env_flag,description,project_cn,new_phone=None,developer=None,developer_project=None,branch=None
				task = run_api_case.apply_async(args=[project_en[0],env_num,env_flag,
									project_en[1],project,chose_run["new_phone"],
									developer,developer_project,branch])
				msg = {"code": 200, "Msg": "执行成功,请查收测试报告","url": r"http://uwsgi.sys.bandubanxie.com/Report"}
		else:
			raise Exception("MySql内未配置,{project}不存在".format(project=project))
	except Exception as e:
		msg = {"code":400,"Msg":"执行失败","ErrorMsg":str(e)}
	return make_response(jsonify(msg))
Beispiel #41
0
 def reset_new_message(user_id, message):
     cache_key = message + str(user_id)
     redis.set(cache_key, 0, -1)