Exemplo n.º 1
0
    def __init__(self, schema):
        self.schema = schema

        self.blocks = jp.match("$..blocks[*]", self.schema)
        self.sub_blocks = jp.match(
            "$..[add_block, edit_block, add_or_edit_block, remove_block]",
            self.schema)
        self.blocks_by_id = {
            block["id"]: block
            for block in self.blocks + self.sub_blocks
        }
        self.block_ids = list(self.blocks_by_id.keys())
        self.calculated_summary_block_ids = {
            block["id"]
            for block in self.blocks_by_id.values()
            if block["type"] == "CalculatedSummary"
        }
        self.sections = jp.match("$.sections[*]", self.schema)
        self.sections_by_id = {
            section["id"]: section
            for section in self.sections
        }
        self.section_ids = list(self.sections_by_id.keys())

        self.groups = jp.match("$..groups[*]", self.schema)
        self.groups_by_id = {group["id"]: group for group in self.groups}
        self.group_ids = list(self.groups_by_id.keys())

        self.list_names = jp.match(
            '$..blocks[?(@.type=="ListCollector")].for_list', self.schema)

        self._answers_with_context = {}
Exemplo n.º 2
0
    def __init__(self, schema):
        self.schema = schema

        self.blocks = jp.match("$..blocks[*]", self.schema)
        self.sub_blocks = jp.match(
            "$..[add_block, edit_block, add_or_edit_block, remove_block]",
            self.schema)
        self.blocks_by_id = {
            block["id"]: block
            for block in self.blocks + self.sub_blocks
        }
        self.block_ids = list(self.blocks_by_id.keys())
        self.sections = jp.match("$.sections[*]", self.schema)
        self.sections_by_id = {
            section["id"]: section
            for section in self.sections
        }
        self.section_ids = list(self.sections_by_id.keys())

        self.groups = jp.match("$..groups[*]", self.schema)
        self.groups_by_id = {group["id"]: group for group in self.groups}
        self.group_ids = list(self.groups_by_id.keys())

        self.list_names = jp.match(
            '$..blocks[?(@.type=="ListCollector")].for_list', self.schema)
Exemplo n.º 3
0
def result_all(data):
        title = jp.match('data.children[*].data.title', data)
        url = jp.match('data.children[*].data.url', data)
        selftext = jp.match('data.children[*].data.selftext', data)
        for titles, urls, selftexts in zip(title, url, selftext):
            print(titles + '\n')
            print(selftexts.encode('utf-8') + '\n')
            print(urls)
Exemplo n.º 4
0
def log_comparison(plan_index, global_index, parameter_name):
    pth1 = "$.planCostShares[" + str(plan_index) + "]." + parameter_name
    pth2 = "$.globalcostshares[" + str(global_index) + "]." + parameter_name
    val1, val2 = str(jp.match(pth1,
                              plan_data)), str(jp.match(pth2, sec_plan_data))
    if val1 == val2:
        logging.info("Both files values of " + parameter_name + " is same")
    else:
        logging.error(
            "\t " + parameter_name +
            " values of file 1 is %s and file 2 is %s", val1, val2)
Exemplo n.º 5
0
 def test(self, response):
     #print(response.text)
     html = json.loads(response.text)
     rate = jsonpath_rw_ext.match('$..rate', html)
     title = jsonpath_rw_ext.match('$..title', html)
     id = jsonpath_rw_ext.match('$..id', html)
     for i in range(0, len(rate)):
         item = FilmIntroductionItem()
         item['movie_rate'] = rate[i]
         item['movie_name'] = title[i]
         item['movie_id'] = id[i]
         yield item
Exemplo n.º 6
0
def find_index(file_json_data, planshare_costsharelevel, planshare_costsharetype, planshare_providertier):
    globalCostShares_json_path = "$.globalcostshares[*]"
    sec_index = 0
    for values in jp.match(globalCostShares_json_path, file_json_data):
        costsharelevel = jp.match("$.globalcostshares[" + str(sec_index) + "].costShareLevel", file_json_data)[0]
        costsharetype = jp.match("$.globalcostshares[" + str(sec_index) + "].costShareType", file_json_data)[0]
        providertier = jp.match("$.globalcostshares[" + str(sec_index) + "].providerTier", file_json_data)[0]

        if costsharelevel == planshare_costsharelevel and costsharetype == planshare_costsharetype:
            if providertier == planshare_providertier:
                return sec_index
        sec_index = sec_index + 1
    return -1
Exemplo n.º 7
0
    def parse(self, response):
        result = json.loads(response.text)
        for image in result.get('list'):
            item = ImageItem()
            gid = image.get('grpmd5')
            item['id'] = image.get('id')
            item['url'] = image.get('qhimg_url')
            item['title'] = image.get('title')
            item['thumb'] = image.get('qhimg_thumb')
            item['gid'] = gid
            yield item

            img_path = 'D:\\images.so_webspider\\images' + os.path.sep + image.get(
                'title')
            if not os.path.exists(img_path):
                os.mkdir(img_path)
                try:
                    t = requests.get(
                        'http://images.so.com/z?a=jsondetailbygidv2&currsn=0&identity=list&ch=beauty&gid={gid}'
                        .format(gid=gid),
                        headers=self.header)
                    if codes.ok == t.status_code:
                        img_url = json.loads(t.text)
                        imgs = jsonpath_rw_ext.match('$..qhimg_url', img_url)
                        for img in imgs:
                            image = requests.get(img,
                                                 headers=self.header_download)
                            file_path = img_path + os.path.sep + '{img}'.format(
                                img=img[24:])
                            with open(file_path, 'wb') as f:
                                f.write(image.content)
                                print('Downloaded image path is %s' %
                                      file_path)
                except Exception as e:
                    print(e)
Exemplo n.º 8
0
    def get_list_of_execution_by_cycle(self, cycle_id, project_id, version_id):

        end_point = 'executions/search/cycle/'
        canonical_path = f"GET&{self.RELATIVE_PATH}{end_point}{cycle_id}&projectId={project_id}&versionId={version_id}"

        token = self.jwt.generate_jwt(canonical_path)

        headers = {
            'Authorization': f'JWT {token}',
            'Content-Type': 'text/plain',
            'zapiAccessKey': self.access_key
        }

        # Get List of Executions By Cycle
        print("Getting list of execution ID's by Cycle")
        raw_result = requests.get(self.ZAPI_CLOUD_URL + self.RELATIVE_PATH +
                                  end_point + cycle_id + '?versionId=' +
                                  version_id + '&projectId=' + project_id,
                                  headers=headers)
        assert raw_result.status_code == 200

        if tools.is_json(raw_result.text):

            # JSON RESPONSE: convert response to JSON
            json_result = json.loads(raw_result.text)

            # PRINT RESPONSE: pretty print with 4 indent
            # print(json.dumps(json_result, indent=4, sort_keys=True))
            # Getting Execution ID Lists
            execution_ids = jp.match("$.searchObjectList[*].execution.id",
                                     json_result)
            print(f"Execution ID's are {execution_ids}")
            return execution_ids
Exemplo n.º 9
0
    def numeric_answer_ranges(self):
        numeric_answer_ranges = {}

        for answer in jp.match("$..answers[*]", self.schema):
            numeric_answer_ranges[answer["id"]] = get_numeric_range_values(
                answer, numeric_answer_ranges)

        return numeric_answer_ranges
Exemplo n.º 10
0
def step_impl(context,status):
  response = requests.request(_method, _url)
  global _result
  _result =  response.json()
  archiveResponse.saveJson(str(_result))
  global _asteroids
  _asteroids = jp.match("$.near_earth_objects.*.[*]", _result)

  assert response.status_code is int(status)
Exemplo n.º 11
0
def step_impl(context):
    global response
    response = requests.request(
        "GET", settings['url']['root'] +
        settings['url']['path']['asteroids']['septemberAsteroids'])
    global data
    data = response.json()
    data = jp.match("$.near_earth_objects.*.[*]", data)
    assert response.status_code is 200
Exemplo n.º 12
0
    def get_blocks(self, **filters):
        conditions = []
        for key, value in filters.items():
            conditions.append(f'@.{key}=="{value}"')

        if conditions:
            final_condition = " & ".join(conditions)
            return jp.match(f"$..blocks[?({final_condition})]", self.schema)
        return self.blocks
Exemplo n.º 13
0
    def get_shopinfo(self, response):
        #使用jsonpath_rw_ext对数据进行提取
        html = json.loads(response.text)

        branchName = jsonpath_rw_ext.match('$..branchName', html)
        categoryId = jsonpath_rw_ext.match('$..categoryId', html)
        categoryName = jsonpath_rw_ext.match('$..categoryName', html)
        id = jsonpath_rw_ext.match('$..id', html)
        matchText = jsonpath_rw_ext.match('$..matchText', html)
        name = jsonpath_rw_ext.match('$..name', html)
        priceText = jsonpath_rw_ext.match('$..priceText', html)
        recommendReasonData = jsonpath_rw_ext.match('$..recommendReasonData',
                                                    html)
        reviewCount = jsonpath_rw_ext.match('$..reviewCount', html)
        shopPower = jsonpath_rw_ext.match('$..shopPower', html)

        for i in range(0, len(name)):
            item = ShopInfoItem()
            item['branch_name'] = branchName[i]
            item['category_id'] = str(categoryId[i])
            item['category_name'] = categoryName[i]
            item['shop_id'] = id[i]
            item['match_text'] = matchText[i]
            item['name'] = name[i]
            item['avg_cost'] = priceText[i]

            #解决部分店铺没有recommendReason字段的问题
            recommendReason = re.sub("{'iconHeight': 0, 'iconWidth': 0,", '',
                                     str(recommendReasonData[i]))
            recommendReason = re.sub(
                "'recommendReasonType': 0, 'recommendReasonUserId': 0}", '',
                recommendReason)
            recommendReason = re.sub("'recommendReason': '", '',
                                     recommendReason)
            recommendReason = re.sub("',", '', recommendReason)
            item['reason'] = recommendReason

            item['review_count'] = str(reviewCount[i])
            item['shop_mark'] = str(shopPower[i])
            item['created_at'] = datetime.datetime.now()
            item['updated_at'] = datetime.datetime.now()
            item['content_id'] = 0
            yield item
Exemplo n.º 14
0
    def get_other_blocks(self, block_id_to_filter, **filters):
        conditions = []
        for key, value in filters.items():
            conditions.append(f'@.{key}=="{value}"')

        if conditions:
            final_condition = " & ".join(conditions)
            return jp.match(
                f'$..blocks[?(@.id!="{block_id_to_filter}" & {final_condition})]',
                self.schema,
            )
        return self.blocks
Exemplo n.º 15
0
 def get_from_JSON(self, json_file, json_path):
     """ 
     Search for a value in the given json file. 
     If multiple matches found it returns multiple matches 
     """
     data = json.dumps(json_file)    # Dict to string
     data = json.loads(data)         # string to json - pretty cumbersome
     value = jp.match(json_path, data)
     if value == []:  # Check if empty
         raise Exception("No value found for the given JSON path: {}".format(json_path))
     print("Actual value: {} {}".format(value[0], type(value[0])))
     return value[0]
Exemplo n.º 16
0
def get_price_from(page, province_code, city_code, begin_date, end_date):
    # 根据不同的页数, 省市的code构造请求
    referer = referer_pattern.format(page=page,
                                     province=province_code,
                                     city=city_code,
                                     begin=begin_date,
                                     end=end_date)
    headers = HEADERS
    headers.update({'referer': referer})
    query = QUERY
    query.update({
        'page': str(page),
        "provinceCode": province_code,
        "cityCode": city_code,
        "startTime": begin_date,
        "endTime": end_date
    })
    r = requests.get(url, headers=headers, params=query)
    data = r.json()
    pages, brands, types, provinces, cities, counties, priceComps, times, bulks, sacks, guides = [], [], [], [], [], [], [], [], [], [], []
    print(data)
    try:
        price_list = jp.match('$..price_list[*]', data)
        # print(price_list)
        for price in price_list:
            pages.append(page)
            brands.append(price['brand'])
            types.append(price['type'])
            provinces.append(price['province'])
            cities.append(price['city'])
            counties.append(price['county'])
            priceComps.append(price['PriceComp'])
            times.append(price['time'])
            bulks.append(price['bulk'])
            sacks.append(price['sack'])
            guides.append(price['guide'])

        tuples = list(
            zip(pages, brands, types, provinces, cities, counties, priceComps,
                times, bulks, sacks, guides))
        output_df = pd.DataFrame(tuples,
                                 columns=[
                                     '页数', '品牌', '水泥品种', '省份', '城市', '区域',
                                     '生产厂家', '日期', '散装价', '袋装价', '参考价'
                                 ])
        # trained = pd.DataFrame(tuples,
        #                        columns=['brand', 'type', 'province', 'city', 'county', 'PriceComp', 'date',
        #                                 'start_priceSack', 'end_priceSack', 'bulk', 'sack', '', 'guide'])
        # coll.insert_many(price_list)
        return True, output_df
    except Exception:
        return False, ''
Exemplo n.º 17
0
def _lookup(
    json_path: str,
    working_dir: str = settings.working_dir(),
    translations_json_location: str = settings.TRANSLATIONS_JSON_LOCATION,
) -> Any:
    """Return jsonpath value or empty list if JSON node doesn't exist."""
    json_data = fetch_source_data(working_dir, translations_json_location)
    value = jp.match(
        json_path,
        json_data,
    )
    value_set = set(value)
    return list(value_set)
Exemplo n.º 18
0
 async def on_post(self, req, res, *, anything):
     print("Inside POST method")
     postedJson = await req.media()
     actualJson = json.dumps(postedJson, indent=4)
     matches = jp.match('$..GlossDiv..Title', postedJson)
     if len(matches) <= 0:
         res.text = "No match found for the query"
     elif len(matches) == 1:
         if matches[0] == 'abc':
             res.media = outputJson("abc.json")
         elif matches[0] == 'xyz':
             res.media = outputJson("xyz.json")
         else:
             res.media = outputJson("abcxyz.json")
     elif len(matches) > 1:
         res.text = "More than one value matched for the query"
Exemplo n.º 19
0
def lambda_handler(event, context):
    client = FarosClient.from_event(event)

    # First, fetch the query from the input key.
    query_key = event['params'].get('key')
    if query_key is None:
        raise RuntimeError("missing 'key' parameter")
    res = requests.get(QUERY_BASE_URL + query_key)
    if res.status_code == 404:  # Nicer error if the query did not exist.
        raise RuntimeError('no such query: {!r}'.format(query_key))
    res.raise_for_status()  # Any other error type.
    query = res.json()['query']

    # Then, execute it and apply any JSONPath expression.
    data = client.graphql_execute(query['doc'])
    json_path = query.get('dataPath')
    if json_path:
        data = jsonpath_rw_ext.match(json_path, data)
    return data
Exemplo n.º 20
0
    def update_json_config(self, json_added, json_path, options, list_column,
                           config_text):
        """
        Write JSON file configuration
        """
        data = literal_eval(config_text)

        if (options != "replace"):
            json_target = jsonpath_rw_ext.match(json_path, data)
            if isinstance(json_target[0], dict):
                if len(list_column) == 1:
                    json_target[0][list_column[0]] = json_added
                    json_final = json_target[0]
                else:
                    return False, json.dumps(data, indent=4)
            else:
                for json_target_value in json_target[0]:
                    json_added.append(json_target_value)
                json_final = json_added
        else:
            json_final = json_added
        jsonpath_expr = parse(json_path)

        matches = jsonpath_expr.find(data)

        if len(matches) == 0:
            return make_response(
                json.dumps({
                    'success': False,
                    'message': 'JSON path not found.'
                }))

        for match in matches:
            data = ClientConfiguration.update_json(
                data, ClientConfiguration.get_path(match), json_final)

        return make_response(
            json.dumps({
                'success': True,
                'data': json.dumps(data, indent=4)
            }))
Exemplo n.º 21
0
# get path to vm, break apart
fullPath = jp.match1("elements[*].Path", json_data)
fullPath = fullPath[0:fullPath.rfind("/")]
pathsplit = fullPath.split("/")
print("Full path: {}".format(fullPath))
print("Parent Folder: {}".format(pathsplit[len(pathsplit) - 1]))

# show IP address and cidr/netmask
print("Default IpAddress: {}".format(
    jp.match1("elements[*].Object.Guest.IpAddress", json_data)))

hasNet = jp.match1("elements[*].Object.Guest.Net", json_data)
if hasNet:
    for IpAddress in jp.match(
            "elements[*].Object.Guest.Net[*].IpConfig.IpAddress[*]",
            json_data):
        cidr = IpAddress['PrefixLength']
        if isIPv4(IpAddress['IpAddress']):
            print("  IPv4 {}/{} netmask {}".format(IpAddress['IpAddress'],
                                                   cidr,
                                                   cidr_to_netmask(cidr)))
        else:
            print("  IPv6 {}/{}".format(IpAddress['IpAddress'], cidr))

else:
    print("There are no network elements in Guest, VM might be powered off")

# memory/cpu
#print("MaxMemoryUsage: {}".format( jp.match1("elements[*].Object.Runtime.MaxMemoryUsage",json_data) ) )
print("MemorySizeMB: {}".format(
 def process(instance, arg):
     return jp.match(arg._expr, instance.payload)
Exemplo n.º 23
0
    pth2 = "$.globalcostshares[" + str(global_index) + "]." + parameter_name
    val1, val2 = str(jp.match(pth1,
                              plan_data)), str(jp.match(pth2, sec_plan_data))
    if val1 == val2:
        logging.info("Both files values of " + parameter_name + " is same")
    else:
        logging.error(
            "\t " + parameter_name +
            " values of file 1 is %s and file 2 is %s", val1, val2)


planCostShares_json_path = "$.planCostShares[*]"
globalCostShares_json_path = "$.globalcostshares[*]"

index = 0
for values in jp.match(planCostShares_json_path, plan_data):
    logging.info("\n\nProduct dict at position " + str((index + 1)) +
                 "\n==========================================\n")
    ps_costsharelevel = jp.match(
        "$.planCostShares[" + str(index) + "].costShareLevel", plan_data)[0]
    ps_costsharetype = jp.match(
        "$.planCostShares[" + str(index) + "].costShareType", plan_data)[0]
    ps_providertier = jp.match(
        "$.planCostShares[" + str(index) + "].providerTier", plan_data)[0]
    gindex = pcu.get_global_share_values_index(sec_plan_data,
                                               ps_costsharelevel,
                                               ps_costsharetype,
                                               ps_providertier)
    logging.info("Found the match at the index : " + str(gindex))
    if gindex > -1:
        logging.error(
Exemplo n.º 24
0
def step_impl(context):
    data = context.response.json()
    context.data = jp.match("$.near_earth_objects.*.[*]", data)
    # After we declare these variables "into context" we have them for the rest of the test
    assert context.response.status_code is 200
Exemplo n.º 25
0
 async def matches(self, getter, session: Union[ClientSession, None]):
     got = await self.get(session=session)
     return jp.match(getter, got)
Exemplo n.º 26
0
 def contains_jsonpath(self, jsonpath):
     return jp.match(jsonpath, self.json_data)
Exemplo n.º 27
0
root = html.fromstring(js_comment_clean(data))


def dom_recurse(node, tags):

    temp_obj = []
    i = 0
    for childnode in node:
        child_obj = []

        if childnode.tag not in tags:

            attr = {}
            for key, value in childnode.attrib.iteritems():
                attr[key] = value

            child_obj.append({
                "child": dom_recurse(childnode, tags),
                "tag": childnode.tag,
                "text": childnode.text,
                "attr": attr
            })
        i = i + 1
        temp_obj.append(child_obj[0])
    return temp_obj


print json.dumps(dom_recurse(root, ['']))
print jp.match('$.[0].child.[*].text', dom_recurse(root, ['']))
Exemplo n.º 28
0
 async def matches(self, getter) -> list:
     got: dict = await self.to_json()
     matches: list = jp.match(getter, got)
     return matches
Exemplo n.º 29
0
 def match(self, instance):
     return jp.match(self._expr, instance.payload)
Exemplo n.º 30
0
 def return_first_value_by_jsonpath(self, jsonpath):
     for v in jp.match(jsonpath, self.json_data):
         return v
     return None