Beispiel #1
0
def cards():
    if request.method == "GET":
        query_string = request.args.get("q", None)
        hid_number = request.args.get("hid_number", None)

        q_object = Q()

        if query_string:
            q_object = (q_object
                        | Q(card_number__icontains=query_string)
                        | Q(card_category__icontains=query_string)
                        | Q(name__icontains=query_string)
                        | Q(job_number__icontains=query_string)
                        | Q(department__icontains=query_string))

        if hid_number:
            q_object = q_object | Q(hid_card_number__icontains=hid_number)

        offset = request.args.get("offset", 0)
        limit = request.args.get("limit", 50)

        try:
            cards = (
                Card.objects.filter(q_object).order_by("-created_time").skip(
                    int(offset)).limit(int(limit)))

        except:
            current_app.logger.exception("get cards failed")
            abort(500)
        else:
            return make_response(cards.to_json())

    elif request.method == "POST":
        cards_list = request.json
        return_list = []
        try:
            for index, card in enumerate(cards_list):
                if index == 0:
                    continue
                if len(card) == 1:
                    continue

                c1 = Card(
                    card_number=normalize_card_number(card[0]),
                    card_category=card[1].strip(),
                    name=card[2].strip(),
                    job_number=card[3].strip(),
                    department=card[4].strip(),
                    gender=card[5].strip(),
                    note=card[6].strip() if card[6] else "default",
                )

                c1.save()

                return_list.append(c1)
        except:
            current_app.logger.exception("post cards failed")
            abort(500)

        else:
            update_all_cards_to_mc_task.delay()
            return make_response(jsonify({"result": len(return_list)}))

    elif request.method == "DELETE":
        cards_to_delete = json.loads(request.args["delete_array"])
        cards_to_delete2 = []
        try:
            for card in cards_to_delete:
                card_obj = Card.objects.get(pk=card)
                card_2 = json.loads(card_obj.to_json())
                cards_to_delete2.append(card_2)
                card_obj.delete()

        except:
            current_app.logger.exception("delete cards failed")
            abort(500)

        else:
            for card_2 in cards_to_delete2:
                delete_a_card_from_mc_task.delay(card_2)
            return make_response(jsonify({"result": len(cards_to_delete)}))
Beispiel #2
0
 def check_user(self, username, password):
     if not Users.objects(
             Q(user_name__exact=username) & Q(password__exact=password)):
         return None
     user = Users.objects.get(user_name=username)
     return user
    def get(self, request, format=None):
        # userid=int(request.GET['userId'])
        json_out={}
        data=[]
        tokens = decode_base64(request.META.get('HTTP_AUTHORIZATION'))

        # tokens = base64.b64decode(request.META.get('HTTP_AUTHORIZATION'))
        time_stamp = tokens[-13:-3]

        if abs(int(time_stamp)-int(time.time())) < 60:

            tokens = re.sub(r'#.*#.*','',tokens)
            # tokens = json_data['userid']
            pld = Auth.decode_auth_token(tokens)

            user_id = pld['data']['id']
            login_time = pld['data']['login_time']
            try:
                # res=Hot_Topic.objects(Q(user_id=userid)&Q(_id__lte=137))
                res=Hot_Topic.objects(Q(_id__lte=140))
                for topic in res:
                    temp={}
                    temp['topicId']=topic._id
                    temp['topicName']=topic.topic_name
                    temp['topicKeywords']=topic.topic_kws
                    temp['imgs']= []
                    if topic._id == 130:
                        temp['imgs'] = [
                        "http://www.moe.edu.cn/jyb_xwfb/xw_fbh/moe_2069/xwfbh_2017n/xwfb_070621/201706/W020170621386205134556.jpg",
                            "http://www.moe.gov.cn/jyb_xwfb/s6052/moe_838/201805/W020180503693998846906.jpg"
                                            ]
                    elif topic._id == 131 :
                        temp['imgs'] = [
                                    "https://gss0.baidu.com/-vo3dSag_xI4khGko9WTAnF6hhy/zhidao/wh%3D600%2C800/sign=62b13c2e0d46f21fc9615655c6144758/cefc1e178a82b901ffebb1e6748da9773812efa5.jpg",
                                        "http://www.81.cn/hkht/attachement/jpg/site351/20150921/18037331c3e11769f42e0e.jpg"
                                            ]
                    elif topic._id == 132:
                        temp['imgs'] = [
                        "http://img.caixin.com/2017-05-28/1495931830739657_480_320.jpg",
                            "http://img.caixin.com/2017-05-28/1495931818880517.jpg"
                                            ]   
                    elif topic._id == 133 :
                        temp['imgs'] = [
                        "https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1525780379793&di=a3a5919e75d989bd440cbaf220285986&imgtype=0&src=http%3A%2F%2Fn.sinaimg.cn%2Fnews%2F20170111%2Fd009-fxzkssy1982912.jpg",
                        "https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1525780367057&di=4fa93c0e58282e8b922ab9164582251d&imgtype=0&src=http%3A%2F%2Fi2.sinaimg.cn%2Fdy%2Fzl%2Fzatan%2F2015-12-17%2FU12776P1T940D5128F24202DT20151217094145.jpg"

                                            ]
                    elif topic._id == 134 :
                        temp['imgs'] = [
                        "http://img1.cache.netease.com/catchpic/8/86/866900747709FBDAD41BC788DC166E85.jpg",
                            "http://www.cs090.com/uploads/userup/333977/2015/1451104091-62H352.jpg"
                                            ]
                    elif topic._id == 135 :
                        temp['imgs'] = [
                        "https://goss3.vcg.com/creative/vcg/800/version23/VCG21a809942a0.jpg",
                            "http://www.gsstc.gov.cn/pic/2014_04/%7B194C4BEF-CA53-41CB-17F9-247BE28F05BE%7D.JPG"
                                            ]
                    elif topic._id == 137 :
                        temp['imgs'] = [
                        "http://www.17ok.com/files_root/upload_file/media/images/201310/20131014112307322.jpg",
                            "http://jjckb.xinhuanet.com/images/2014-12/20/xin_111211201146295262568.jpg"
                        ]
                    elif topic._id == 138 :
                        temp['imgs'] = [
                                "http://image.sinajs.cn/newchart/hk_stock/min_660/hsi.gif",
                                    "http://www.cash.com.hk/en/img/banner.jpg"
                        ]
                    elif topic._id == 139 :
                        temp['imgs'] = [
                                "http://campusrd.zhaopin.com/CompanyLogo/20141018/492BFE08208B4E1E9590F1CCE0784F8A.jpg",
                                "https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1525780345536&di=d559538bb39e2c9db59177e851842025&imgtype=0&src=http%3A%2F%2Fcdns.soufunimg.com%2Fimgs%2Fhouse%2F2015_08%2F10%2Fhefei%2F1439169810477_000.jpg"

                        ]
                    elif topic._id == 136 :
                        temp['imgs'] = [
                                "http://finance.ce.cn/rolling/201305/13/W020130615723679354246.jpg",
                                    "http://www.0938f.com/userfiles/image/20150924/24100931069729c1de2991.jpg"
                        ]
                    else :
                        pass
                    # res =  Post_News.objects(hot_topic_id=topic._id)
                    # for post_res in res :
                    #     if len(post_res.img_url) > 5:
                    #         temp['imgs'].append(post_res.img_url)
                    #     else :
                    #         pass
                    # try:        
                    #     temp['imgs'] = random.sample(temp['imgs'],5)
                    # except:
                    #     temp['imgs'] = temp['imgs']
                    temp['summary']=topic.summary
                    data.append(temp)
                json_out['code']=0
                json_out['success']=True
                json_out['data']=data
            except:
                traceback.print_exc()
                json_out['code']=1
                json_out['success']=False
                json_out['data']={}

            return HttpResponse(json.dumps(json_out, cls=MyEncoder),content_type="application/json")
        else:
            json_out['code'] = 1
            json_out['data'] = '认证失败'
            json_out['success'] = False
            return HttpResponse(json.dumps(json_out, cls=MyEncoder),content_type="application/json")
Beispiel #4
0
def cardtests():
    q_object = Q()

    datetime_from = request.args.get("datetime_from", None)
    datetime_to = request.args.get("datetime_to",
                                   None)  # '2018-07-20T07:15:00.000Z'
    job_number = request.args.get("job_number", None)
    card_number = request.args.get("card_number", None)
    department = request.args.get("department", None)
    is_downloading_excel = request.args.get("is_downloading_excel", None)
    name = request.args.get("name", None)
    mc_id = request.args.get("mc_id", None)
    card_cat = request.args.get("card_cat", None)
    hid_number = request.args.get("hid_number", None)

    if datetime_from:
        datetime_from = datetime.datetime.strptime(
            datetime_from,
            "%Y-%m-%dT%H:%M:%S.%fZ").replace(tzinfo=datetime.timezone.utc)
        q_object = q_object & Q(test_datetime__gte=datetime_from)

    if datetime_to:
        datetime_to = datetime.datetime.strptime(
            datetime_to,
            "%Y-%m-%dT%H:%M:%S.%fZ").replace(tzinfo=datetime.timezone.utc)
        q_object = q_object & Q(test_datetime__lte=datetime_to)

    if hid_number:
        card_number = hid_to_normal(hid_number)
        q_object = q_object & Q(card_number__icontains=card_number)

    elif card_number:
        card_number = normalize_card_number(card_number)
        q_object = q_object & Q(card_number__icontains=card_number)

    if job_number:
        cards = Card.objects.filter(job_number__icontains=job_number)
        q_object = q_object & Q(
            card_number__in=[card.card_number for card in cards])

    if department:
        cards = Card.objects.filter(department__icontains=department)
        q_object = q_object & Q(
            card_number__in=[card.card_number for card in cards])

    if name:
        cards = Card.objects.filter(name__icontains=name)
        q_object = q_object & Q(
            card_number__in=[card.card_number for card in cards])
    if mc_id:
        q_object = q_object & Q(mc_id__icontains=mc_id)

    if card_cat:
        q_object = q_object & Q(card_category=card_cat)

    offset = request.args.get("offset", 0)
    limit = request.args.get("limit", 50)

    try:
        if is_downloading_excel:
            results = [[
                "log_id",
                "卡片编号",
                "卡片号码",
                "卡片分类",
                "进出标志",
                "mc id",
                "测试时间",
                "测试结果",
                "是否测试",
                "手测试值(KΩ)",
                "左脚测试值(KΩ)",
                "右脚测试值(KΩ)",
                "erg后数值",
                "rsg",
                "姓名",
                "工号",
                "HID 卡号",
            ]]
            logs = CardTest.objects.filter(q_object).order_by("-test_datetime")

            for log in logs:
                card_category = ""
                if log["card_category"] == "0":
                    card_category = "VIP"
                if log["card_category"] == "1":
                    card_category = "只测手"
                if log["card_category"] == "2":
                    card_category = "只测脚"
                if log["card_category"] == "3":
                    card_category = "手脚都测"

                in_out_symbol = ""
                if log["in_out_symbol"] == "0":
                    in_out_symbol = "出"
                if log["in_out_symbol"] == "1":
                    in_out_symbol = "进"

                # 获得系统时区
                system_config_timezone = int(
                    SystemConfig.objects.get().timezone)
                local_tz = datetime.timezone(
                    datetime.timedelta(hours=system_config_timezone))
                test_datetime = (log["test_datetime"].replace(
                    tzinfo=datetime.timezone.utc).astimezone(
                        local_tz).isoformat())

                test_result = ""
                if log["test_result"] == "0":
                    test_result = "不通过"
                if log["test_result"] == "1":
                    test_result = "通过"

                is_tested = ""
                if log["is_tested"] == "0":
                    is_tested = "不测试"
                if log["is_tested"] == "1":
                    is_tested = "测试"

                name = ""
                job_number = ""
                try:
                    card = Card.objects.filter(
                        card_number=log["card_number"]).first()
                    name = card["name"]
                    job_number = card["job_number"]

                except:
                    pass

                results.append([
                    log["log_id"],
                    log["card_counter"],
                    log["card_number"],
                    card_category,
                    in_out_symbol,
                    log["mc_id"],
                    test_datetime,
                    test_result,
                    is_tested,
                    log["hand"],
                    log["left_foot"],
                    log["right_foot"],
                    log["after_erg"],
                    log["rsg"],
                    name,
                    job_number,
                    hid_number,
                ])
            return excel.make_response_from_array(results, "xlsx")

        cards = (
            CardTest.objects.filter(q_object).order_by("-test_datetime").skip(
                int(offset)).limit(int(limit)))
        return cards.to_json(), {"Content-Type": "application/json"}
    except:
        current_app.logger.exception("get cardtests failed")
        abort(500)
Beispiel #5
0
def package_list_for_sharing_table(request):
    try:
        if request.method != 'GET':
            return HttpResponseNotAllowed(['GET'])
        # 認証する
        user = authentication(request)
        if user is None:
            return error(
                Exception('You have no permission for this operation.'))
        # ajax parameter取得
        # 表示する長さ
        iDisplayLength = int(request.GET['iDisplayLength'])
        # 表示開始位置インデックス
        iDisplayStart = int(request.GET['iDisplayStart'])
        # 検索文字列
        sSearch = request.GET['sSearch']
        # ソートする列
        sort_col = int(request.GET['iSortCol'])
        # ソート順番 (desc指定で降順)
        sort_dir = request.GET['sSortDir']

        order_query = None
        SORT_INDEX_PACKAGE_NAME = 3

        # pakcage_name
        if sort_col == SORT_INDEX_PACKAGE_NAME:
            order_query = 'package_name'

        # 昇順/降順
        if order_query is not None:
            # descが降順
            if sort_dir == 'desc':
                order_query = '-' + order_query

        # 検索対象のコミュニティリストを検索
        community_objects = Communities.objects.filter(name__icontains=sSearch)
        # 検索
        objects = StixFiles.objects.filter(
            Q(package_name__icontains=sSearch)
            | Q(input_community__in=community_objects)) \
            .order_by(order_query)
        objects = objects.filter(Q(is_post_sns__ne=False))

        # 検索結果から表示範囲のデータを抽出する
        data = []
        for d in objects[iDisplayStart:(iDisplayStart + iDisplayLength)]:
            r = {}
            r['comment'] = d.comment
            r['package_name'] = d.package_name
            r['package_id'] = d.package_id
            try:
                r['input_community'] = d.input_community.name
            except BaseException:
                r['input_community'] = ''
            data.append(r)

        # response data 作成
        r_data = {}
        r_data['iTotalRecords'] = StixFiles.objects.count()
        r_data['iTotalDisplayRecords'] = objects.count()
        r_data['data'] = data
        resp = get_normal_response_json()
        resp['data'] = r_data
        return JsonResponse(resp)
    except Exception as e:
        traceback.print_exc()
        return error(e)
    def get(self, request, format=None):
        # userid=int(request.GET['userId'])
        topicid=int(request.GET['topicId'])
        json_out={}
        data={}
        tokens = decode_base64(request.META.get('HTTP_AUTHORIZATION'))

        # tokens = base64.b64decode(request.META.get('HTTP_AUTHORIZATION'))
        time_stamp = tokens[-13:-3]

        if abs(int(time_stamp)-int(time.time())) < 60:

            tokens = re.sub(r'#.*#.*','',tokens)
            # tokens = json_data['userid']
            pld = Auth.decode_auth_token(tokens)

            user_id = pld['data']['id']
            login_time = pld['data']['login_time']
            try:
                year_now=date.today().year
                month_now=date.today().month
                day_now=date.today().day
                datatype_list = Datatype_name.objects
                month_find=0
                year_find=0
                date_find_min=None
                date_find_max=None
                if month_now<3:
                    month_find=month_now-2+12
                    year_find=year_now-1
                else:
                    month_find=month_now-2
                    year_find=year_now
                date_find_min=datetime.datetime.combine(date(year_find, month_find, 1), datetime.time.min)
                date_find_max=datetime.datetime.combine(date(year_now, month_now, day_now), datetime.time.max)
                topics=Hot_Topic.objects(_id=topicid)
                topics_name = []
                topic_relat_list = []
                topic_relations = Topic_Relation.objects(Q(_id=topicid))
                # print len(topic_relations)
                for topic_relat in topic_relations:
                    index = topic_relat._id - 130
                    for ind,val in enumerate(topic_relat.topic_relat) :
                        # print '222'
                        if index == ind :
                            continue
                        triple_group = {}
                        triple_group['source'] = index
                        triple_group['target'] = ind
                        triple_group['weight'] = val
                        # triple_group = [index,ind,val]
                        topic_relat_list.append(triple_group)


                

                all_topics = Hot_Topic.objects(Q(_id__gte=130)&Q(_id__lte=139))
                print len(all_topics)
                for topic in all_topics:
                    topic_obj = {}
                    topic_obj['id'] = topic._id
                    topic_obj['topic_name'] = topic.topic_name
                    topic_obj['topic_kws'] = topic.topic_kws
                    topic_obj['user_id'] = topic.user_id
                    topic_obj['user_name'] = topic.user_name
                    topic_obj['summary'] = topic.summary
                    topics_name.append(topic_obj)
                for topic in topics:
                    data['topicId']=topic._id
                    data['topic_index']=topic._id-130

                    res_evos=Topic_evolution.objects(Q(topic_id=str(topicid)))
                    print '11'
                    # count = 0
                    data_list=[]
                    for item in res_evos:
                        # count += 1
                        # if count == 500 :
                        #   break
                        # else :
                        #   pass
                        temp={}
                        # temp['id']=str(item._id)
                        temp['time']=item.time
                        temp['number']=item.number
                        temp['topic_id']=item.topic_id
                        temp['word']=item.word
                        # temp['topic']=item.topic[0]
                        data_list.append(temp)
                    data['topic_info']=data_list

                    img_list = []

                    data['topicName']=topic.topic_name
                    data['topic_kws']={}
                    data['all_topic']=topics_name
                    data['topic_relation']=topic_relat_list
                    for kw in topic.topic_kws:
                        data['topic_kws'][kw] = random.randint(1,100)
                    # data['topic_kws']=topic.topic_kws
                    # 
                    click_num=0
                    postdata=[]
                    for data_type in datatype_list:
                    	print '11111111'
                        res1=Post_News.objects(Q(hot_topic_id=topicid)&Q(data_type=data_type.data_type)).only('pt_time','site_id','data_type','comm_num','img_url','title','content')
                        # res1=Post_News.objects(Q(hot_topic_id=topicid)).only('pt_time','site_id','data_type','comm_num')
                        # click_num=res1.sum('comm_num')
                        print len(res1)
                        # click_num=click_num+res1.sum('comm_num')
                        for post in res1:
                            temp={}
                            temp['postTime']=post.pt_time
                            temp['site_id']=post.site_id
                            temp['title']=post.title
                            if len(post.img_url) > 5:
                                img_list.append(post.img_url)
                            else :
                                pass
                            temp['content']=post.content[:100]
                            temp['dataType']=post.data_type
                            temp['site_name']=sitedict[int(post.site_id)]
                            temp['dataTypeName']=datatypedict[int(post.data_type)]
                            postdata.append(temp)

                    # summa = 0

                    
                    # for post in res1[:200]:

                    #   # if summa > 100 :
                    #   #   continue
                    #   # else:
                    #   #   pass
                    #   # summa +=1

                    #     temp={}
                    #     temp['postTime']=post.pt_time
                    #     temp['site_id']=post.site_id
                    #     temp['title']=post.title
                    #     if len(post.img_url) > 5:
                    #         img_list.append(post.img_url)
                    #     else :
                    #         pass
                    #     temp['content']=post.content
                    #     temp['dataType']=post.data_type
                    #     temp['site_name']=sitedict[int(post.site_id)]
                    #     temp['dataTypeName']=datatypedict[int(post.data_type)]
                    #     postdata.append(temp)
                    try:
                        data['img_url'] = random.sample(img_list,5)
                    except:
                        data['img_url'] = img_list

                    data['clickNums']=click_num
                    data['postData']=postdata
                json_out['code']=0
                json_out['success']=True
                json_out['data']=data
            except:
                traceback.print_exc()
                json_out['code']=1
                json_out['success']=False
                json_out['data']={}

            return HttpResponse(json.dumps(json_out, cls=MyEncoder),content_type="application/json")
        else:
            json_out['code'] = 1
            json_out['data'] = '认证失败'
            json_out['success'] = False
            return HttpResponse(json.dumps(json_out, cls=MyEncoder),content_type="application/json")
Beispiel #7
0
def _get_observables_cashes_query(pattern, package_id):
    q = Q()
    # pattern 文字列から種別と値を正規表現で抽出し、 OR 検索で連結する
    for v in url_pattern.findall(pattern):
        q |= (Q(package_id__ne=package_id) & Q(type__exact='uri')
              & Q(value__exact=v))
    for v in ipv4_pattern.findall(pattern):
        q |= (Q(package_id__ne=package_id) & Q(type__exact='ipv4')
              & Q(value__exact=v))
    for v in domain_name_pattern.findall(pattern):
        q |= (Q(package_id__ne=package_id) & Q(type__exact='domain_name')
              & Q(value__exact=v))
    for v in md5_pattern.findall(pattern):
        q |= (Q(package_id__ne=package_id) & Q(type__exact='md5')
              & Q(value__exact=v))
    for v in sha1_pattern.findall(pattern):
        q |= (Q(package_id__ne=package_id) & Q(type__exact='sha1')
              & Q(value__exact=v))
    for v in sha256_pattern.findall(pattern):
        q |= (Q(package_id__ne=package_id) & Q(type__exact='sha256')
              & Q(value__exact=v))
    for v in sha512_pattern.findall(pattern):
        q |= (Q(package_id__ne=package_id) & Q(type__exact='sha512')
              & Q(value__exact=v))
    return q
Beispiel #8
0
    def post(self, request, format=None):
        recommendFilter = request.data

        # Realiza la consulta con los filtrosy ordenador por la calificación en orden ascendente
        recipesQuery = Recipes.objects(
            Q(calories__lt=recommendFilter["calories"])
            & Q(protein__lt=recommendFilter["protein"])
            & Q(sodium__lt=recommendFilter["sodium"])
            & Q(ingredients__nin=recommendFilter["ingredients"])).order_by(
                "-rating")[:5000]  #Mirar como limitar la consulta

        # recipesQuery = Recipes.objects(
        #     Q(calories__lt=recommendFilter["calories"]) & Q(protein__lt=recommendFilter["protein"]) &
        #     Q(sodium__lt=recommendFilter["sodium"])
        # ).order_by("-rating")

        serializer = RecipeSerializer(recipesQuery, many=True)

        # recipes = recipesQuery.data
        #
        # if recipes.__len__() == 0: #No tiene característica __len__()?
        #     return Response(status=status.HTTP_404_NOT_FOUND)
        # elif recipes.__len__() > 1:
        #     serializer = RecipeSerializer(recipes, many=True)
        # else:
        #     serializer = RecipeSerializer(recipes, many=False)
        # Obtenemos el json
        # response = {"recipes": serializer.data}

        # Obtenemos el DataFrame con los datos serializados
        dataframe = pd.DataFrame.from_dict(serializer.data)

        # Obtenemos los datos que queremos entrenar
        train = dataframe[["calories", "protein", "sodium"]]
        data_train, test_train = train_test_split(train,
                                                  test_size=0.2,
                                                  random_state=42)

        # Entrenamos los datos
        k = 10  # Número de muestras que nos va a devolver
        neig = neighbors.NearestNeighbors(
            k, 'cosine')  # Aplicamos el algoritmo de vecinos más cercanos
        neig.fit(data_train)  #Entrenamos los datos
        top_k_distances, top_k_items = neig.kneighbors(
            data_train,
            return_distance=True)  # Obtenemos los vecinos más cercanos

        top = []
        for item in top_k_items:
            top.extend(item)
        # a = top_k_items[0] #Escogemos la primera opción
        recipes = dataframe.iloc[top]  # Obtenemos los datos del dataframe
        response = {"recipes": recipes.T}  # Creamos un JSON

        # Escogemos las recetas recomendadas y creamos dietas teniendo en cuenta los datos dados por el usuario
        diets = []

        diet = Diet()
        day = Day()

        # diet = Diet()
        # diet.days = []

        countCalories = 0.0
        countSodium = 0.0
        countProteins = 0.0
        morning = 0
        count = 0

        for i in recipes[[
                "id", "title", "calories", "protein", "sodium", "rating",
                "ingredients", "directions", "categories", "description"
        ]].values:  # Obtener para el resto de valores
            cal = float(i[2])
            prot = float(i[3])
            sod = float(i[4])
            rate = float(i[5])

            if diets.__len__() == 10:
                break

            if cal < recommendFilter["calories"] and prot < recommendFilter["protein"]\
                    and sod < recommendFilter["sodium"]\
                    and (countCalories + cal) <= recommendFilter["calories"]\
                    and (countProteins + prot) <= recommendFilter["protein"]\
                    and (countSodium + sod) <= recommendFilter["sodium"]\
                    and diet.days.__len__() <= 7:
                recipe = Rec(i[1], cal, prot, sod, rate, i[6], i[7], i[8],
                             i[9])
                countCalories += cal
                countProteins += prot
                countSodium += sod

                if morning == 0:
                    day.recipes.append(recipe.__dict__)
                    morning = 1
                elif morning == 1:
                    day.name = "Día " + (count + 1).__str__()
                    day.recipes.append(recipe.__dict__)
                    morning = 0
                    count = count + 1

                    diet.days.append(day.__dict__)
                    day = Day()
            if diet.days.__len__() >= 7:
                diets.append(diet.__dict__)
                diet = Diet()

                countCalories = 0.0
                countSodium = 0.0
                countProteins = 0.0
                morning = 0
                count = 0

        response = {"diet": diets}

        # for i in diets:
        #     print(i.__dict__)
        #     print(i)
        #     print(i.__str__())
        #
        # response = json.dumps([di.__dict__ for di in diets])
        # print(response)

        # return JsonResponse()
        return Response(response, status=status.HTTP_200_OK)
Beispiel #9
0
        ret = {}
        try:
            sceneInfoResult = sceneInfo.objects(
                Q(userId=id) & Q(triggerIntent__contains=intentName)).first()
            if sceneInfoResult is not None:
                ret['isTrigger'] = True
                ret['sceneName'] = sceneInfoResult['sceneName']
            else:
                ret['isTrigger'] = False
                ret['sceneName'] = ""
        except:
            traceback.print_exc()
            ret['isTrigger'] = False
            ret['sceneName'] = ""
        return ret


if __name__ == "__main__":
    userId = "user111"
    sceneName = "SCENE_ADJUST_QUOTA"
    sceneResult = sceneInfo.objects(Q(userId=userId)
                                    & Q(sceneName=sceneName)).first()
    if (sceneResult is not None):
        sceneResultDict = sceneResult.to_mongo().to_dict()
        triggerIntent = sceneResultDict['triggerIntent']
        flowConfig = sceneResultDict['flowConfig']
        intentInfo = {}
        for key in triggerIntent:
            intentInfo[key] = LoadData().loadIntentData(intentName=key,
                                                        userId=userId)
Beispiel #10
0
def _get_exact_matched_info(package_id):
    ret_observable_cashes = []

    # match 検索対象の cache list
    cache_collections = [
        ObservableCaches,  # STIX 1.x/2.x の Observables
        ExploitTargetCaches,
    ]

    for cache_collection in cache_collections:
        caches = cache_collection.objects.filter(package_id=package_id)
        for cache in caches:
            # type,valueが一緒で、検索 package_idと異なる Cache を検索 (observables 間検索)
            # Obaservable の値がない場合はスキップする
            if cache.value is None:
                continue
            exacts = cache_collection.objects.filter(
                Q(type__exact=cache.type)
                & Q(value__exact=cache.value)
                & Q(package_id__ne=package_id))
            ret_observable_cashes.extend(exacts)

            # ObservableCaches の検索の場合はさらに その Observable が STIX v2 の indicator pattern 文字列にヒットするか判定する
            if isinstance(cache, ObservableCaches):
                # Observable と STIX 2.x の indicators 間
                indicator_v2_cachses = IndicatorV2Caches.objects.filter(
                    Q(package_id__ne=package_id))
                for indicator_v2_cache in indicator_v2_cachses:
                    observed_data = _get_observed_data(cache)
                    matches = matcher.match(indicator_v2_cache.pattern,
                                            [observed_data])
                    if len(matches) != 0:
                        indicator_v2_cache.type = cache.type
                        indicator_v2_cache.value = cache.value
                        # start_node の検索先は ObservableCaches or ExploitTargetCaches
                        indicator_v2_cache.start_collection = cache_collection
                        # 格納する end_info は (指定の package_id) の ObservableCache に該当する IndicatorV2Cache
                        ret_observable_cashes.append(indicator_v2_cache)

    # STIX2 indicator cache の matching
    indicator_v2_caches = IndicatorV2Caches.objects.filter(
        package_id=package_id)
    for indicator_v2_cache in indicator_v2_caches:
        # 対象の IndicatorV2Caches に Match する (Package_id は別の) ObservableCaches を検索する
        query_set = _get_observables_cashes_query(indicator_v2_cache.pattern,
                                                  package_id)
        caches = ObservableCaches.objects.filter(query_set)
        for cache in caches:
            observed_data = _get_observed_data(cache)
            matches = matcher.match(indicator_v2_cache.pattern,
                                    [observed_data])
            if len(matches) != 0:
                # start_info は IndicatorV2Caches から検索
                cache.start_collection = IndicatorV2Caches
                cache.pattern = indicator_v2_cache.pattern
                # 格納する end_info は (指定の package_id) の Indicator v2 にマッチする ObservableCache
                ret_observable_cashes.append(cache)

        # 自分以外の PackageID で STIX2 indicator pattern を検索する
        exacts = IndicatorV2Caches.objects.filter(
            Q(pattern__exact=indicator_v2_cache.pattern)
            & Q(package_id__ne=package_id))
        for exact in exacts:
            exact.cache_list = IndicatorV2Caches
            # start_node の検索先は ObservableCaches or ExploitTargetCaches
            exact.start_collection = IndicatorV2Caches
            # 格納するのは (指定の package_id) の Indicator v2 にマッチする (指定の package_id 以外に)IndicatorV2Cache
            ret_observable_cashes.append(exact)
    return ret_observable_cashes
Beispiel #11
0
def advanced_search(query):
    # lexer
    toks = []
    while len(query) > 0:
        incr = 0
        if re.search(r'^ ', query):
            incr = 1
        elif re.search(r'^AND', query):
            incr = 3
            toks.append('AND')
        elif re.search(r'^NOT', query):
            incr = 3
            toks.append('NOT')
        else:
            next_term = re.search(r' +(AND|NOT) +', query)
            if next_term:  # gets next term based on everything before next AND or NOT
                toks.append(query[:next_term.span()[0]])
                incr = next_term.span()[0]
            else:  # if next term is at end of query
                term = re.search(r'^\S.*', query)
                if term:
                    toks.append(term.group())
                    incr = len(term.group())

        query = query[incr:]

    # parser
    and_count = 0
    ands = None
    nots = None
    # validates and preprocesses query
    if toks[0] == 'AND':
        return []
    if toks[0] != 'NOT':
        ands = Q(tag=re.compile("^" + toks[0] + "$", re.IGNORECASE))
        toks = toks[1:]
        and_count += 1
    if len(toks) % 2 == 1:
        return []

    while len(toks) > 0:
        if toks[0] == 'AND':
            q = Q(tag=re.compile("^" + toks[1] + "$", re.IGNORECASE))
            if ands == None:
                ands = q
            else:
                ands = ands | q
            and_count += 1
        elif toks[0] == 'NOT':
            q = Q(tag=re.compile("^" + toks[1] + "$", re.IGNORECASE))
            if nots == None:
                nots = q
            else:
                nots = nots | Q(tag=re.compile("^" + toks[1] +
                                               "$", re.IGNORECASE))
        else:
            return []
        toks = toks[2:]

    # get ids
    and_tags = Tag.objects(ands) if ands != None else []
    not_tags = Tag.objects(nots) if nots != None else []
    # if invalid tag exists in and tags
    if len(and_tags) != and_count:
        return []

    # get files
    q = None
    for tag in and_tags:
        if q == None:
            q = Q(tags__contains=tag.id)
        else:
            q = q & Q(tags__contains=tag.id)
    for tag in not_tags:
        if q == None:
            q = Q(tags__ne=tag.id)
        else:
            q = q & Q(tags__ne=tag.id)

    files = File.objects(q)
    return files
Beispiel #12
0
    def get(self, dataset_id):
        """ Endpoint called by image viewer client """

        parsed_args = page_data.parse_args()
        per_page = parsed_args.get('limit')
        page = parsed_args.get('page') - 1
        folder = parsed_args.get('folder')
        order = parsed_args.get('order')

        args = dict(request.args)

        # Check if dataset exists
        dataset = current_user.datasets.filter(id=dataset_id,
                                               deleted=False).first()
        if dataset is None:
            return {'message', 'Invalid dataset id'}, 400

        # Make sure folder starts with is in proper format
        if len(folder) > 0:
            folder = folder[0].strip('/') + folder[1:]
            if folder[-1] != '/':
                folder = folder + '/'

        # Get directory
        directory = os.path.join(dataset.directory, folder)
        if not os.path.exists(directory):
            return {'message': 'Directory does not exist.'}, 400

        # Remove parsed arguments
        for key in parsed_args:
            args.pop(key, None)

        # Generate query from remaining arugments
        query = {}
        for key, value in args.items():
            lower = value.lower()
            if lower in ["true", "false"]:
                value = json.loads(lower)

            if len(lower) != 0:
                query[key] = value

        # Change category_ids__in to list
        if 'category_ids__in' in query.keys():
            query['category_ids__in'] = [
                int(x) for x in query['category_ids__in'].split(',')
            ]

        # Initialize mongo query with required elements:
        query_build = Q(dataset_id=dataset_id)
        query_build &= Q(path__startswith=directory)
        query_build &= Q(deleted=False)

        # Define query names that should use complex logic:
        complex_query = ['annotated', 'category_ids__in']

        # Add additional 'and' arguments to mongo query that do not require complex_query logic
        for key in query.keys():
            if key not in complex_query:
                query_dict = {}
                query_dict[key] = query[key]
                query_build &= Q(**query_dict)

        # Add additional arguments to mongo query that require more complex logic to construct
        if 'annotated' in query.keys():

            if 'category_ids__in' in query.keys() and query['annotated']:

                # Only show annotated images with selected category_ids
                query_dict = {}
                query_dict['category_ids__in'] = query['category_ids__in']
                query_build &= Q(**query_dict)

            else:

                # Only show non-annotated images
                query_dict = {}
                query_dict['annotated'] = query['annotated']
                query_build &= Q(**query_dict)

        elif 'category_ids__in' in query.keys():

            # Ahow annotated images with selected category_ids or non-annotated images
            query_dict_1 = {}
            query_dict_1['category_ids__in'] = query['category_ids__in']

            query_dict_2 = {}
            query_dict_2['annotated'] = False
            query_build &= (Q(**query_dict_1) | Q(**query_dict_2))

        # Perform mongodb query
        images = current_user.images \
            .filter(query_build) \
            .order_by(order).only('id', 'file_name', 'annotating', 'annotated', 'num_annotations', 'category_ids')

        total = images.count()
        pages = int(total / per_page) + 1

        images = images.skip(page * per_page).limit(per_page)
        images_json = query_util.fix_ids(images)
        # for image in images:
        #     image_json = query_util.fix_ids(image)

        #     query = AnnotationModel.objects(image_id=image.id, deleted=False)
        #     category_ids = query.distinct('category_id')
        #     categories = CategoryModel.objects(id__in=category_ids).only('name', 'color')

        #     image_json['annotations'] = query.count()
        #     image_json['categories'] = query_util.fix_ids(categories)

        #     images_json.append(image_json)

        subdirectories = [
            f for f in sorted(os.listdir(directory))
            if os.path.isdir(directory + f) and not f.startswith('.')
        ]

        categories = CategoryModel.objects(id__in=dataset.categories).only(
            'id', 'name')

        return {
            "total": total,
            "per_page": per_page,
            "pages": pages,
            "page": page,
            "images": images_json,
            "folder": folder,
            "directory": directory,
            "dataset": query_util.fix_ids(dataset),
            "categories": query_util.fix_ids(categories),
            "subdirectories": subdirectories
        }
Beispiel #13
0
 def random(self, request, *args, **kwargs):
     count = Place.objects.count()
     random = randint(0, count - 10)
     places = Place.objects(
         Q(types__contains="restaurant") | Q(types__contains="bar")
         | Q(types__contains="food") | Q(types__contains="bakery")
         | Q(types__contains="cafe") | Q(types__contains="casino")
         | Q(types__contains="convenience_store")
         | Q(types__contains="meal_delivery")
         | Q(types__contains="make_takeaway")
         | Q(types__contains="nightclub")
         | Q(types__contains="shopping_mall")).limit(10).skip(random)
     serializer = PlaceSerializer(places, many=True)
     return Response(serializer.data)
Beispiel #14
0
def dashboard_sourceData(request):
    if request.method == 'GET':
        json_out = {}
        main_out = {}
        days_num = 0
        data = {}
    
        try:
            # sourcedata
            days_list = []
            day = pd.Period(datetime.datetime.now(),freq='D')
            # logger.info(type(Topic.objects)) # Topic.objects.all()Topic.objects()返回类型相同
            topic_list = Topic.objects
            datatype_list = Datatype_name.objects

            today = date.today()
            post_7days = Post.objects(Q(pt_time__gte=datetime.datetime.combine\
                            (today-datetime.timedelta(7),datetime.time.min)) &
                            Q(pt_time__lte=datetime.datetime.combine(today, datetime.time.max)))

            while days_num < 7:
                day_str = day.strftime('%Y%m%d')  

                day_change = today - datetime.timedelta(days=days_num)
                post = post_7days(Q(pt_time__gte=\
                            datetime.datetime.combine(day_change,datetime.time.min)) & \
                            Q(pt_time__lte=datetime.datetime.combine\
                            (day_change, datetime.time.max)))


                for topic in topic_list:
                    for data in datatype_list:
                        day_dict = {}
                        day_dict['time'] = day_str
                        day_dict['topic_id'] = topic._id
                        day_dict['topic_name'] = topic.topic_name
                        day_dict['dataTypeName'] = data.datatype_name
                        day_dict['data_type'] = data.data_type
                        post_datatype = post(Q(data_type=data.data_type) & Q(topic_id=topic._id))
                        # logger.info('post_num = ' + str(len(post_datatype)))
                        day_dict['post_num'] = post_datatype.count()
                        days_list.append(day_dict)

                for data in datatype_list:
                    day_dict = {}
                    day_dict['time'] = day_str
                    day_dict['topic_id'] = 0
                    day_dict['topic_name'] = ''
                    day_dict['dataTypeName'] = data.datatype_name
                    day_dict['data_type'] = data.data_type
                    post_datatype = post(Q(data_type=data.data_type) & Q(topic_id=0))
                    # logger.info('post_num = ' + str(len(post_datatype)))
                    day_dict['post_num'] = post_datatype.count()
                    days_list.append(day_dict)

                day -= 1
                days_num += 1


            #######  Hot for all host posts
            hot_dict = {}
            hot_posts = post_7days(Q(topic_id__ne=0) & (Q(data_type=3) | Q(data_type=2))).order_by \
                                  ('-comm_num')[:10].only("_id", "url", \
                                    "board", "title", "content", "pt_time", \
                                    "img_url", "poster")

            # hot_poster = post_7days.only('poster').all()
            # logger.info("hot_poster = " + str(hot_poster.count()))
    
            hot_weibo = post_7days(Q(topic_id__ne=0) & Q(data_type=2)).order_by \
                                ('-comm_num')[:10].only("_id", "url", \
                                "board", "title", "content", "pt_time", \
                               "img_url")

            hot_dict['hotPost'] = handle_post_list(hot_posts)
            hot_dict['hotPoster'] = handle_poster_list(hot_posts)
            hot_dict['hotWeibo'] = handle_post_list(hot_weibo)


            # wordlist = []
            # wordres=Cloud_formain.objects.only("word", "frequency")
            # for worditem in wordres:
            #     temp={}
            #     temp['word']=worditem.word
            #     temp['weight']=worditem.frequency
            #     wordlist.append(temp)


            #######  map data
            mapData_list = [{'id':'001',
                                     'pro':"陕西",
                                     'nums':52
                                    },
                                    {
                                     'id':'002',
                                     'pro':"北京",
                                     'nums':100
                                    },
                                    {
                                    'id':'003',
                                     'pro':"上海",
                                     'nums':60
                                     },
                                    {
                                    'id':'004',
                                     'pro':"杭州",
                                     'nums':48
                                     },
                                    {
                                    'id':'005',
                                    'pro':"南京",
                                    'nums':50
                                    }
                                ]

            main_out['mapData'] = mapData_list
            main_out['sourceData'] = days_list
            main_out['Hot'] = hot_dict
            # main_out['word_cloud'] = wordlist



            json_out['code'] = 0
            json_out['success'] = True
            json_out['data'] = main_out
        except:
            traceback.print_exc()
            json_out['code'] = 1
            json_out['data'] = {}
            json_out['success'] = False

        return HttpResponse(json.dumps(json_out, cls=MyEncoder),content_type="application/json")
Beispiel #15
0
def l1_info_for_l1table(request):
    try:
        if request.method != 'GET':
            return HttpResponseNotAllowed(['GET'])
        # 認証する
        user = authentication(request)
        if user is None:
            return error(
                Exception('You have no permission for this operation.'))
        # ajax parameter取得
        # 表示する長さ
        iDisplayLength = int(request.GET['iDisplayLength'])
        # 表示開始位置インデックス
        iDisplayStart = int(request.GET['iDisplayStart'])
        # 検索文字列
        sSearch = request.GET['sSearch']
        # ソートする列
        sort_col = int(request.GET['iSortCol'])
        # ソート順番 (desc指定で降順)
        sort_dir = request.GET['sSortDir']
        # alias情報
        # 存在しない場合は空としてあつかつ
        try:
            aliases_str = request.GET['aliases']
            alias_lists = json.loads(aliases_str)
        except BaseException:
            alias_lists = []

        order_query = None

        SORT_INDEX_TYPE = 0
        SORT_INDEX_VALUE = 1
        SORT_INDEX_PACKAGE_NAME = 2
        SORT_INDEX_TILE = 3
        SORT_INDEX_DESCRIPTION = 4
        SORT_INDEX_TIMESTAMP = 5

        # type
        if sort_col == SORT_INDEX_TYPE:
            order_query = 'type'
        # value
        elif sort_col == SORT_INDEX_VALUE:
            order_query = 'value'
        # pacakge_name
        elif sort_col == SORT_INDEX_PACKAGE_NAME:
            order_query = 'package_name'
        # title
        elif sort_col == SORT_INDEX_TILE:
            order_query = 'title'
        # description
        elif sort_col == SORT_INDEX_DESCRIPTION:
            order_query = 'description'
        # timestamp
        elif sort_col == SORT_INDEX_TIMESTAMP:
            order_query = 'produced'

        # 昇順/降順
        if order_query is not None:
            # descが降順
            if sort_dir == 'desc':
                order_query = '-' + order_query

        # query
        # 検索ワードをリスト化
        tmp_sSearches = list(set(sSearch.split(' ')))
        # 空要素は取り除く
        if '' in tmp_sSearches:
            tmp_sSearches.remove('')

        # 検索リスト作成
        sSearches = []
        for item in tmp_sSearches:
            # まず、元の単語は追加する
            sSearches.append(item)
            # alias_lists 1つずつチェックする
            for alias_list in alias_lists:
                # 検索ワードがalias_listにあれば、そのリストに含まれるすべての単語が検索対象
                if item in alias_list:
                    sSearches.extend(alias_list)

        # 重複を省く
        sSearches = list(set(sSearches))

        # Filterを作成する
        filters = Q()
        # alias含め、その文字列が含まれていたらヒットとする
        for sSearch in sSearches:
            filters = filters | Q(type__icontains=sSearch)
            filters = filters | Q(value__icontains=sSearch)
            filters = filters | Q(package_name__icontains=sSearch)
            filters = filters | Q(title__icontains=sSearch)
            filters = filters | Q(description__icontains=sSearch)
        # 検索
        objects = ObservableCaches.objects.filter(filters).order_by(
            order_query)

        # 検索結果から表示範囲のデータを抽出する
        data = []
        for d in objects[iDisplayStart:(iDisplayStart + iDisplayLength)]:
            r = {}
            r['type'] = d.type
            r['value'] = d.value
            r['package_name'] = d.package_name
            r['package_id'] = d.stix_file.package_id
            r['title'] = d.title
            r['description'] = d.description
            r['created'] = str(d.created)
            r['stix_v2'] = d.stix_file.is_stix_v2()
            r['observable_id'] = d.observable_id
            data.append(r)

        # response data 作成
        r_data = {}
        r_data['iTotalRecords'] = ObservableCaches.objects.count()
        r_data['iTotalDisplayRecords'] = objects.count()
        r_data['data'] = data
        resp = get_normal_response_json()
        resp['data'] = r_data
        return JsonResponse(resp)
    except Exception as e:
        traceback.print_exc()
        return error(e)
Beispiel #16
0
def dashboard_sourceData_temp(request):
    if request.method == 'GET':
        json_out = {}
        main_out = {}
        days_num = 0
        data = {}

        try:
            # sourcedata
            days_list = []
            topic_list = Topic.objects
            day = pd.Period(datetime.datetime.now(),freq='D')
            # logger.info(type(Topic.objects)) # Topic.objects.all()Topic.objects()返回类型相同
            datatype_list = Datatype_name.objects

            post_7days = Post.objects(Q(pt_time__gte=datetime.datetime.combine\
                             (date.today()-datetime.timedelta(6),datetime.time.min)) &
                             Q(pt_time__lte=datetime.datetime.combine(date.today(), datetime.time.max)))

            while days_num < 7:
                day_dict = {}
                day_str = day.strftime('%Y%m%d')
                day_dict['time'] = day_str

                post = post_7days(Q(pt_time__gte=\
                             datetime.datetime.combine(date.today()-\
                             datetime.timedelta(days=days_num), \
                             datetime.time.min)) & \
                             Q(pt_time__lte=datetime.datetime.combine\
                             (date.today()-datetime.timedelta(days=days_num), \
                             datetime.time.max)))

                for topic in topic_list:
                    for data in datatype_list:
                        day_dict = {}
                        day_dict['time'] = day_str
                        day_dict['topic_id'] = topic._id
                        day_dict['topic_name'] = topic.topic_name
                        day_dict['dataTypeName'] = data.datatype_name
                        day_dict['data_type'] = data.data_type
                        post_datatype = post(Q(data_type=data.data_type) & Q(topic_id=topic._id))
                        # logger.info('post_num = ' + str(len(post_datatype)))
                        day_dict['post_num'] = post_datatype.count()
                        days_list.append(day_dict)


                # for data in datatype_list:
                #     day_dict['dataTypeName'] = data.datatype_name
                #     day_dict['data_type'] = data.data_type
                #     post_datatype = post(Q(data_type=data.data_type))
                #     # logger.info('post_num = ' + str(len(post_datatype)))
                #     day_dict['post_num'] = len(post_datatype)

                #     day_dict_ = dict(day_dict)
                #     days_list.append(day_dict_)

                day -= 1
                days_num += 1


            #######  Hot for all host posts
            hot_dict = {}
            hot_posts_temp = post_7days(Q(data_type=3))
            hot_posts = hot_posts_temp.order_by \
                                   ('-comm_num')[:10].only("_id", "url", \
                                     "board", "title", "content", "pt_time", \
                                     "img_url","comm_num","repost_num")

            hot_weibo_temp = post_7days(Q(data_type=2))
            hot_weibo = hot_weibo_temp.order_by \
                                 ('-comm_num')[:10].only("_id", "url", \
                                 "board", "title", "content", "pt_time", \
                                "img_url","comm_num","repost_num")

            hot_poster = hot_posts_temp.order_by('-poster.post_num')[:10].only("poster")

            hot_weibouser_post_num = hot_weibo_temp.order_by('-poster.post_num').only("poster")
            hot_weibouser = []
            hot_weibouser_id = []
            num_ten=0
            for item in hot_weibouser_post_num:
              if num_ten ==10:
                break
              else:
                if item.poster.id not in hot_weibouser_id:
                  hot_weibouser.append(item)
                  hot_weibouser_id.append(item.poster.id)
                  num_ten=num_ten+1

            hot_dict['hotPost'] = handle_post_list1(hot_posts)
            hot_dict['hotPoster'] = handle_poster_list1(hot_poster)
            hot_dict['hotWeibo'] = handle_post_list1(hot_weibo)
            # hot_dict['hotPoster'] = handle_poster_list(hot_posts)

            hot_dict['hotWeiboUser'] = handle_weibouser_list1(hot_weibouser)


             #######  map data
            mapData_list = [{'id':'001',
                                      'pro':"陕西",
                                      'nums':52
                                     },
                                     {
                                      'id':'002',
                                      'pro':"北京",
                                      'nums':100
                                     },
                                     {
                                     'id':'003',
                                      'pro':"上海",
                                      'nums':60
                                      },
                                     {
                                     'id':'004',
                                      'pro':"杭州",
                                      'nums':48
                                      },
                                     {
                                     'id':'005',
                                     'pro':"南京",
                                     'nums':50
                                     }
                                 ]

            wordlist = []
            wordres=Cloud_formain.objects(Q(topic_id=999)).only("word", "frequency")
            for worditem in wordres:
                temp={}
                temp['word']=worditem.word
                temp['weight']=worditem.frequency
                wordlist.append(temp)

            

            main_out['mapData'] = mapData_list
            main_out['sourceData'] = days_list
            main_out['Hot'] = hot_dict
            main_out['word_cloud'] = wordlist

            json_out['code'] = 0
            json_out['success'] = True
            json_out['data'] = main_out
        except:
            traceback.print_exc()
            json_out['code'] = 1
            json_out['data'] = {}
            json_out['success'] = False

        return HttpResponse(json.dumps(json_out, cls=MyEncoder),content_type="application/json")
Beispiel #17
0
    def _sync_user_role_assignments(self, user_db, role_assignment_dbs, role_assignment_apis):
        """
        Synchronize role assignments for a particular user.

        :param user_db: User to synchronize the assignments for.
        :type user_db: :class:`UserDB`

        :param role_assignment_dbs: Existing user role assignments.
        :type role_assignment_dbs: ``list`` of :class:`UserRoleAssignmentDB`

        :param role_assignment_apis: List of user role assignments to apply.
        :param role_assignment_apis: ``list`` of :class:`UserRoleAssignmentFileFormatAPI`

        :rtype: ``tuple``
        """
        db_roles = set([(entry.role, entry.source) for entry in role_assignment_dbs])

        api_roles = [
            list(izip_longest(entry.roles, [], fillvalue=entry.file_path))
            for entry in role_assignment_apis
        ]

        api_roles = set(list(chain.from_iterable(api_roles)))

        # A list of new assignments which should be added to the database
        new_roles = api_roles.difference(db_roles)

        # A list of assignments which need to be updated in the database
        updated_roles = db_roles.intersection(api_roles)

        # A list of assignments which should be removed from the database
        removed_roles = db_roles - api_roles

        LOG.debug('New assignments for user "%s": %r' % (user_db.name, new_roles))
        LOG.debug('Updated assignments for user "%s": %r' % (user_db.name, updated_roles))
        LOG.debug('Removed assignments for user "%s": %r' % (user_db.name, removed_roles))

        # Build a list of role assignments to delete
        roles_to_delete = updated_roles.union(removed_roles)

        role_assignment_dbs_to_delete = [
            role_assignment_db
            for role_assignment_db in role_assignment_dbs
            if (role_assignment_db.role, role_assignment_db.source) in roles_to_delete
        ]

        for role_name, assignment_source in roles_to_delete:
            queryset_filter = (
                Q(user=user_db.name)
                & Q(role=role_name)
                & Q(source=assignment_source)
                & (Q(is_remote=False) | Q(is_remote__exists=False))
            )

            UserRoleAssignmentDB.objects(queryset_filter).delete()

            LOG.debug(
                'Removed role "%s" from "%s" for user "%s".'
                % (role_name, assignment_source, user_db.name)
            )

        # Build a list of roles assignments to create
        roles_to_create = new_roles.union(updated_roles)
        created_role_assignment_dbs = []

        for role_name, assignment_source in roles_to_create:
            role_db = Role.get(name=role_name)
            if not role_db:
                msg = 'Role "%s" referenced in assignment file "%s" doesn\'t exist'
                raise ValueError(msg % (role_name, assignment_source))

            role_assignment_api = [
                r for r in role_assignment_apis if r.file_path == assignment_source
            ][0]
            description = getattr(role_assignment_api, "description", None)

            assignment_db = rbac_service.assign_role_to_user(
                role_db=role_db, user_db=user_db, source=assignment_source, description=description
            )

            created_role_assignment_dbs.append(assignment_db)

            LOG.debug(
                'Assigned role "%s" from "%s" for user "%s".'
                % (role_name, assignment_source, user_db.name)
            )

        return (created_role_assignment_dbs, role_assignment_dbs_to_delete)