예제 #1
0
        def run_assertion(method, description, args, expected):
            description = method + ': ' + description
            print
            print "Running", description, method
            method = {
                'diff': diff,
                'apply_diff': apply_diff,
                'transform': transform_object_diff
            }[method]

            if method is transform_object_diff:
                if len(args) == 3:
                    original, obja, objb = args
                    policy = None
                else:
                    original, obja, objb, policy = args
                diffa = diff(original, obja, policy)['v']
                diffb = diff(original, objb, policy)['v']
                tdiff = transform_object_diff(diffa, diffb, original, policy)
                intermediate = apply_diff(original, diffb)
                method = apply_diff
                args = [intermediate, tdiff]

            got = method(*args)
            if not equals(got, expected):
                print
                print "Exception coming..."
                print
                print "\tdescription", description
                print "\tgot", simplejson.dumps(got)
                print "\texp", simplejson.dumps(expected)
            return equals(got, expected)
예제 #2
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument("first")
    parser.add_argument("second")
    parser.add_argument("-p", "--patch", action="store_true", default=False)
    parser.add_argument("-s", "--syntax", action="store", type=str, default="compact")
    parser.add_argument("-i", "--indent", action="store", type=int, default=None)

    args = parser.parse_args()

    with open(args.first, "r") as f:
        with open(args.second, "r") as g:
            jf = json.load(f)
            jg = json.load(g)
            if args.patch:
                x = jsondiff.patch(
                    jf,
                    jg,
                    marshal=True,
                    syntax=args.syntax
                )
            else:
                x = jsondiff.diff(
                    jf,
                    jg,
                    marshal=True,
                    syntax=args.syntax
                )

            json.dump(x, sys.stdout, indent=args.indent)
예제 #3
0
파일: models.py 프로젝트: Affirm/moto
 def parse_payload(cls, desired, reported):
     if desired is None:
         delta = reported
     elif reported is None:
         delta = desired
     else:
         delta = jsondiff.diff(desired, reported)
     return delta
예제 #4
0
파일: plugin.py 프로젝트: jedmitten/diffy
    def run(self, items: List[dict], **kwargs) -> List[dict]:
        """Run simple difference calculation on results based on a baseline."""
        logger.debug('Performing simple local baseline analysis.')

        if not kwargs.get('baseline'):
            raise BadArguments('Cannot run simple analysis. No baseline found.')

        for i in items:
            i['diff'] = diff(kwargs['baseline']['stdout'], i['stdout'])

        return items
예제 #5
0
    def run(self, items: List[dict], **kwargs) -> List[dict]:
        """Run simple difference calculation on results based on a baseline."""
        logger.debug("Performing simple local baseline analysis.")

        if not kwargs.get("baseline"):
            raise BadArguments("Cannot run simple analysis. No baseline found.")

        for i in items:
            i["diff"] = diff(kwargs["baseline"]["stdout"], i["stdout"])

        return items
예제 #6
0
def test_diff():
    data1 = {"age": 42,
            "name": "Bob",
            "food": "okonomiyaki"
            }
    data2 = {"name": "Casimir",
            "Location": "Torcy",
            "age": 42
            }
    assert diff(data1, data2) == {
        '-food': 'okonomiyaki',
        'name': 'Casimir',
        '+Location': 'Torcy'}
    def test_restart_heketi_pod(self):
        """Validate restarting heketi pod"""

        # create heketi volume
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url,
                                        size=1, json=True)
        self.assertTrue(vol_info, "Failed to create heketi volume of size 1")
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol_info['id'], raise_on_error=False)
        topo_info = heketi_topology_info(self.heketi_client_node,
                                         self.heketi_server_url,
                                         json=True)

        # get heketi-pod name
        heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                               self.heketi_dc_name)

        # delete heketi-pod (it restarts the pod)
        oc_delete(self.ocp_master_node[0], 'pod', heketi_pod_name)
        wait_for_resource_absence(self.ocp_master_node[0],
                                  'pod', heketi_pod_name)

        # get new heketi-pod name
        heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],
                                               self.heketi_dc_name)
        wait_for_pod_be_ready(self.ocp_master_node[0],
                              heketi_pod_name)

        # check heketi server is running
        self.assertTrue(
            hello_heketi(self.heketi_client_node, self.heketi_server_url),
            "Heketi server %s is not alive" % self.heketi_server_url
        )

        # compare the topology
        new_topo_info = heketi_topology_info(self.heketi_client_node,
                                             self.heketi_server_url,
                                             json=True)
        self.assertEqual(new_topo_info, topo_info, "topology info is not same,"
                         " difference - %s" % diff(topo_info, new_topo_info))

        # create new volume
        vol_info = heketi_volume_create(self.heketi_client_node,
                                        self.heketi_server_url,
                                        size=2, json=True)
        self.assertTrue(vol_info, "Failed to create heketi volume of size 20")
        heketi_volume_delete(
            self.heketi_client_node, self.heketi_server_url, vol_info['id'])
예제 #8
0
def step_impl(context, response):
    if response == 'empty':
        json_data = {}
        json_expected = {}
    else:
        json_data = json.loads(context.last_response.text)
        json_expected = json.loads(response)

    # remove the id from response and expected result
    if isinstance(json_data, list):
        [data.pop('id', None) for data in json_data]
        [data.pop('created', None) for data in json_data]
        [data.pop('id', None) for data in json_expected]
        [data.pop('created', None) for data in json_expected]

    else:
        json_data.pop('id', None)
        json_data.pop('created', None)
        json_expected.pop('id', None)
        json_expected.pop('created', None)

    assert len(diff(json_data, json_expected)) == 0
예제 #9
0
파일: editconf.py 프로젝트: azogue/enerpi
def _web_post_changes_enerpi_sensors_json(dict_web_form, lines_config, dict_config, dest_filepath=None):
    """
    Process changes in web editor of ENERPI SENSORS JSON File

    :param dict_web_form: :OrderedDict: Posted Form with new JSON data
    :param lines_config: :list: original file text lines (1) with original JSON
    :param dict_config: :OrderedDict: original config dict of dicts
                            (like the one 'web_edit_enerpi_sensors_json' returns)
    :param dest_filepath: :str: (optional) destination filepath for save configuration changes
    :return: :tuple: (:dict: alert message, :list: text lines, :OrderedDict: updated dict_config))

    """
    alerta = None
    t, sub = TITLE_EDIT_JS_SENSORS, SUBTITLE_EDIT_JS_SENSORS
    try:
        ant_json = json.loads(dict_config[t][sub][0], encoding=ENCODING)
        new_json = json.loads(dict_web_form[sub], encoding=ENCODING)
        diff_json = diff(ant_json, new_json)
        if diff_json:
            if dest_filepath is not None:
                log('New JSON Sensors config ("{}")\nwill be saved in "{}"'
                    .format(new_json, dest_filepath), 'warning', False)
                with open(dest_filepath, 'w') as f:
                    json.dump(new_json, f, indent=1)
            str_cambios = ('Configuration changes in ENERPI SENSORS:<br>{}<br> New config SAVED!'
                           .format('JSON DIFF- <strong>"{}"</strong>'.format(diff_json)))
            log(str_cambios, 'debug', False)
            alerta = {'alert_type': 'warning', 'texto_alerta': str_cambios}
            if dest_filepath is not None:
                lines_config = get_lines_file(dest_filepath)
                ok, dict_config = _web_edit_enerpi_sensors_json(lines_config)
                if not ok:
                    alerta['alert_type'] = 'error'
                    alerta['texto_alerta'] += dict_config['error']
                    alerta['texto_alerta'] += '\nNEW JSON SENSORS FILE NOT VALID!! FIX IT, PLEASE'
    except json.decoder.JSONDecodeError:
        msg_err = ('JSONDecodeError in web_post_changes_enerpi_sensors_json: {}'.format(dict_config[t][sub]))
        alerta = {'alert_type': 'error', 'texto_alerta': msg_err}
    return alerta, lines_config, dict_config
예제 #10
0
def main():
    j = open('./directory.json').read()
    directory = json.loads(j)
    directory_master = get_directory_from_master_branch()
    directory_diff = {}
    directory_diff = json.loads(diff(directory_master, directory, syntax='explicit', dump=True))
    spec_dir = './api.freifunk.net/specs/*.json'
    spec_files = glob.glob(spec_dir)
    for spec_file in spec_files:
        spec_content = open(spec_file).read()
        ff_api_specs[os.path.splitext(os.path.basename(spec_file))[0]] = json.loads(spec_content)

    urls_to_load = []
    invalid_urls = []

    if "$insert" in directory_diff:
        print("check inserted entries")
        for x in directory_diff["$insert"]:
            urls_to_load.append(directory_diff["$insert"][x])
    
    if "$update" in directory_diff:
        print("check updated entries")
        for x in directory_diff["$update"]:
            urls_to_load.append(directory_diff["$update"][x])

    if urls_to_load == []:
        print("check all files, as nothing else changed")
        for x in directory_master:
            urls_to_load.append(directory_master[x])

    result = fetch_parallel(urls_to_load)

    if result.empty():
        print('Result: All URLs are valid :-)')
        sys.exit(0)
    else:
        print('\nResult: Invalid URLs found :-(')
        sys.exit(1)
예제 #11
0
def process_bounty_changes(old_bounty, new_bounty):
    """Process Bounty changes.

    Args:
        old_bounty (dashboard.models.Bounty): The old Bounty object.
        new_bounty (dashboard.models.Bounty): The new Bounty object.

    """
    from dashboard.utils import build_profile_pairs
    profile_pairs = None
    # process bounty sync requests
    did_bsr = False
    for bsr in BountySyncRequest.objects.filter(
            processed=False, github_url=new_bounty.github_url):
        did_bsr = True
        bsr.processed = True
        bsr.save()

    # get json diff
    json_diff = diff(old_bounty.raw_data,
                     new_bounty.raw_data) if old_bounty else None

    # new bounty
    if not old_bounty or (not old_bounty and new_bounty
                          and new_bounty.is_open) or (not old_bounty.is_open
                                                      and new_bounty.is_open):
        is_greater_than_x_days_old = new_bounty.web3_created < (
            timezone.now() - timezone.timedelta(hours=24))
        if is_greater_than_x_days_old:
            msg = 'attempting to create a new bounty ({new_bounty.standard_bounties_id}) when is_greater_than_x_days_old = True'
            print(msg)
            raise Exception(msg)
        event_name = 'new_bounty'
    elif old_bounty.num_fulfillments < new_bounty.num_fulfillments:
        event_name = 'work_submitted'
    elif old_bounty.is_open and not new_bounty.is_open:
        if new_bounty.status == 'cancelled':
            event_name = 'killed_bounty'
        else:
            event_name = 'work_done'
    else:
        event_name = 'unknown_event'
        logging.error(
            f'got an unknown event from bounty {old_bounty.pk} => {new_bounty.pk}: {json_diff}'
        )

    print(f"- {event_name} event; diff => {json_diff}")

    # Build profile pairs list
    if new_bounty.fulfillments.exists():
        profile_pairs = build_profile_pairs(new_bounty)

    # marketing
    if event_name != 'unknown_event':
        print("============ posting ==============")
        did_post_to_twitter = maybe_market_to_twitter(new_bounty, event_name)
        did_post_to_slack = maybe_market_to_slack(new_bounty, event_name)
        did_post_to_github = maybe_market_to_github(new_bounty, event_name,
                                                    profile_pairs)
        did_post_to_email = maybe_market_to_email(new_bounty, event_name)
        print("============ done posting ==============")

        # what happened
        what_happened = {
            'did_bsr': did_bsr,
            'did_post_to_email': did_post_to_email,
            'did_post_to_github': did_post_to_github,
            'did_post_to_slack': did_post_to_slack,
            'did_post_to_twitter': did_post_to_twitter,
        }

        print("changes processed: ")
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(what_happened)
    else:
        print('No notifications sent - Event Type Unknown = did_bsr: ',
              did_bsr)
예제 #12
0
            # elif str(diff_name) == '$update':
            #     self.reqless.append(self.diff_json[diff_name])
            elif str(diff_name) == '$insert':
                self.sqlless.append(self.diff_json[diff_name])
            else:
                if 'delete' in str(
                        self.diff_json[diff_name]) or 'insert' in str(
                            self.diff_json[diff_name]) or 'update' in str(
                                self.diff_json[diff_name]):
                    diff_json_match(self.diff_json[diff_name], self.sqlless,
                                    self.reqless, self.matchinfo).find_diff()
                else:
                    self.matchinfo.append(
                        {diff_name: self.diff_json[diff_name]})
        return {
            '查询缺少': self.sqlless,
            '返回值缺少': self.reqless,
            '匹配结果': self.matchinfo
        }


if __name__ == '__main__':
    a = {}
    b = {'code': 500, 'data': None, 'message': '服务器内部处理错误', 'success': False}
    matchinfo = diff(a, b, syntax='symmetric')
    print(matchinfo)
    # matchinfo = [{}, {'code': 500, 'data': None, 'message': '服务器内部处理错误', 'success': False}]

    mmm = diff_json_match(matchinfo, [], [], []).find_diff()
    print(mmm)
예제 #13
0
    def test_json(self):

        entrypoint = parser.from_JSON_string(TEST_JSON)
        out = parser.to_JSON_string(entrypoint)
        d = diff(TEST_JSON, out)
예제 #14
0
        payload = {
            'jsonrpc': '2.0',
            'method': method,
            'id': 'test',
            'params': params
        }
        endpoint = 'http://' + self.Host + ':' + str(self.Port) + '/rpc'
        # print(endpoint)
        # print(json.dumps(payload))
        response = requests.post(endpoint,
                                 data=json.dumps(payload),
                                 headers={
                                     'Content-Type': 'application/json'
                                 }).json()
        # print(response)
        if 'error' in response:
            raise ValueError(response['error'])
        return response


firstService = BlockExplorerSerivce('localhost', 22869)
secondService = BlockExplorerSerivce('localhost', 23869)

for i in range(1, 2418):
    first = firstService.getBlockInfo(i)
    second = secondService.getBlockInfo(i)
    if first != second:
        print("Difference at height: %d" % (i))
        print(json.dumps(diff(first, second, syntax='symmetric'), indent=4))
        break
예제 #15
0
def main():

    log = logging.getLogger(__name__)
    #    es_cred = None
    try:
        with open('auth.yml') as f:
            es_cred = yaml.load(f)['creds']
        with open('clusters.yml') as f:
            clusters = yaml.load(f)
    except Exception as e:
        # log error
        raise e

    cluster_name = 'monitoring_sat'
    watcher_sets = wm.metadata.keys()

    log.info("available clusters: %s" % repr(clusters.keys()))
    log.info("contacting '%s' cluster" % cluster_name)
    log.info("available watcher configs: %s" % repr(watcher_sets))
    log.debug("cluster nodes: %s" % repr(clusters[cluster_name]))

    # connect to the cluster and create a watcher API object
    prod_es = es.Elasticsearch(hosts=clusters[cluster_name],
                               sniff_on_start=False,
                               sniff_on_connection_fail=False,
                               http_auth=(es_cred['user'], es_cred['pass']))
    watch_es = es.client.xpack.watcher.WatcherClient(prod_es)

    with open('watchers/cluster_state/cluster_state.json') as f:
        clst_json = json.load(f)

    try:
        watcher_id = 'cluster_state'
        watch_template = watch_es.get_watch(watcher_id)
        wbody = watch_template['watch']
    except es.exceptions.NotFoundError as e:
        pprint(e)

        wbody = clst_json
        wp_result = watch_es.put_watch(watcher_id, wbody, active=True)
        if (wp_result is not None):
            status = ('Updated', 'Created')[wp_result['created']]
            log.info("%s v%d : %s " %
                     (wp_result['_id'], wp_result['_version'], status))
        else:
            log.info('%s creation failed' % watcher_id)

        sys.exit(0)

    # pull the template watcher from the cluster
    watch_template = watch_es.get_watch('filesystem_usage')
    watch_body = watch_template['watch']

    with open(
            'watchers/filesystem_usage/filesystem_usage_watcher_template.json'
    ) as f:
        fsuw_json = json.load(f)
    #pprint(fsuw_json)

# jdiff = jsondiff.diff(fsuw_json, watch_body)
    jdiff = jsondiff.diff(watch_body, fsuw_json)

    pprint(jdiff)

    try:
        enodes_sm = watch_es.get_watch('elasticsearch_node_system_memory')
    except es.exceptions.NotFoundError as e:
        pprint(e)
        sys.exit(0)

    pprint(enodes_sm)
    if (True):
        sys.exit(0)

    # clear out the metadata settings for the new watch
    for _w_id in fs_metadata.keys():
        watcher_id = "filesystem_usage_%s" % _w_id
        wbody = watch_template['watch']
        # clear out the metadata from the template and replace with cluster/fs specs
        wbody['metadata'] = {}
        wbody['metadata'] = fs_metadata[_w_id]
        wp_result = watch_es.put_watch(watcher_id, wbody, active=True)
        if (wp_result is not None):
            status = ('Updated', 'Created')[wp_result['created']]
            log.info("%s v%d : %s " %
                     (wp_result['_id'], wp_result['_version'], status))
        else:
            log.info('%s creation failed' % watcher_id)

    # the template watcher gets activated sometimes after being queried;
    # run a disable on it
    watch_es.deactivate_watch('filesystem_usage_alert')
예제 #16
0
    def perform(self):
        for cluster in config.settings['clusters']:
            if (self.region != 'all' and self.region != cluster['region']) or (self.env != 'all' and self.env != cluster['env']):
                continue

            print "*--------------------------------*"
            print "Getting data for {} {}".format(cluster['region'], cluster['env'])
            print "*--------------------------------*"

            ecs = EcsHelper(cluster['region'])
            ssm = SsmHelper(cluster['region'])
            cluster['services'] = []
            services = []
            task_definitions = []
            service_list_arns = ecs.list_services(cluster['name'], [], '')

            for service_chunk in [service_list_arns[i:i + 10] for i in xrange(0, len(service_list_arns), 10)]:
                for service in ecs.client.describe_services(cluster=cluster['name'], services=service_chunk)['services']:
                    name = "_".join((service['serviceName'].split('-')[3:-4]))
                    if self.services is not '' and name in self.services.split(','):
                        services.append(service)
                    elif self.services is '':
                        services.append(service)

            for service in services:
                task_definitions.append(ecs.client.describe_task_definition(
                    taskDefinition=service['taskDefinition']
                ))

            for task_definition in task_definitions:
                dict_ordered = None
                for containerDefinition in task_definition['taskDefinition']['containerDefinitions']:
                    for param in containerDefinition['environment']:
                        if param['name'] == "VERSION" or param['name'] == "PARAM_VERSION":
                            dict_ordered = None
                            image_tag = containerDefinition['image'].split(':')[-1]
                            regionEnv = "{}.{}".format(base.get_landscape(cluster['region']), base.get_environment(cluster['env']))
                            print containerDefinition['name']

                            author = ssm.get_author(regionEnv, containerDefinition['name'], param['value'])

                            try:
                                author = ("".join(author).split('/')[-1:])[0]
                            except Exception as e:
                                author = None
                            github = GithubHelper().get_param_version_data(containerDefinition['name'], base.get_landscape(cluster['region']), cluster['env'])
                            version = regionEnv + "." + containerDefinition['name'].replace('-', '_') + "." + param['value']
                            ssm_version = ssm.get_param(version)

                            diff_output = diff(ssm_version, github['params'])

                            if self.diff_mode is True and author != self.ignore_author and (len(diff_output) > 0 or param['value'] != github['version'] or author != 'codeship_param_pusher'):
                                dict_ordered = OrderedDict([('name', containerDefinition['name']), ('region', regionEnv), ('image_tag', image_tag), ('running_param_version', param['value']), ('github_param_version', github['version']), ('author', author), ('diff', diff_output)])
                                break
                            elif self.diff_mode is False and author != self.ignore_author:
                                dict_ordered = OrderedDict([('name', containerDefinition['name']), ('region', regionEnv), ('image_tag', image_tag), ('running_param_version', param['value']), ('github_param_version', github['version']), ('author', author), ('diff', diff_output)])
                                break

                if dict_ordered is not None: cluster['services'].append(dict_ordered)

            print "\n\n"

        self.output_data()
예제 #17
0
 def _report_mismatch(self, run_data, upd_data):
     log_error("run_data vs expected_data: {}".format(
         str(jsondiff.diff(run_data, upd_data))[0:40]))
예제 #18
0
    def test_inconsistent_objects(self, hge_ctx):
        with open(self.dir() + "/test.yaml") as c:
            test = yaml.safe_load(c)

        # setup
        st_code, resp = hge_ctx.v1q(json.loads(json.dumps(test['setup'])))
        assert st_code == 200, resp

        try:
            # exec sql to cause inconsistentancy
            sql_res = hge_ctx.sql(test['sql'])

            # reload metadata
            st_code, resp = hge_ctx.v1q(q=self.reload_metadata)
            assert st_code == 200, resp

            # fetch inconsistent objects
            st_code, resp = hge_ctx.v1q(q=self.get_inconsistent_metadata)
            assert st_code == 200, resp
            incons_objs_test = test['inconsistent_objects']
            incons_objs_resp = resp['inconsistent_objects']

            assert resp['is_consistent'] == False, resp
            assert incons_objs_resp == incons_objs_test, yaml.dump({
                'response':
                incons_objs_resp,
                'expected':
                incons_objs_test,
                'diff':
                jsondiff.diff(incons_objs_test, incons_objs_resp)
            })

            # export metadata
            st_code, export = hge_ctx.v1q(q=self.export_metadata)
            assert st_code == 200, export

            # apply metadata
            st_code, resp = hge_ctx.v1q(q={
                "type": "replace_metadata",
                "args": export
            })
            assert st_code == 400, resp

        finally:
            # drop inconsistent objects
            st_code, resp = hge_ctx.v1q(q=self.drop_inconsistent_metadata)
            assert st_code == 200, resp

            # reload metadata
            st_code, resp = hge_ctx.v1q(q=self.reload_metadata)
            assert st_code == 200, resp

            # fetch inconsistent objects
            st_code, resp = hge_ctx.v1q(q=self.get_inconsistent_metadata)
            assert st_code == 200, resp

            assert resp['is_consistent'] == True, resp
            assert len(resp['inconsistent_objects']) == 0, resp

            # teardown
            st_code, resp = hge_ctx.v1q(
                json.loads(json.dumps(test['teardown'])))
            assert st_code == 200, resp
예제 #19
0
파일: reporter.py 프로젝트: kpn/pyrandall
 def json_diff_report(expected, actual):
     return jsondiff.diff(expected, actual, syntax="explicit")
예제 #20
0
    def diffColLogs (clg1, clg2, frm, to):
        print('Comparing nodes in col_logs... '+str(len(clg1['nodes']))+'-'+str(len(clg2['nodes'])))
        result = {'globals':{}, 'nodes':[], 'links':[]}
        node_mapping = {'inserted':{}, 'deleted':{}, 'same':{}}
        node_name_id = {} # 1x1 mapping node name to new id
        nm = ni = ndl = lm = li = ld = idx = 0 
        for orig_node in clg1['nodes']:
            other_nd = LogDBUtils.findNodeByName(orig_node, clg2['nodes'])
            if other_nd:
                df = diff(orig_node, other_nd, syntax='symmetric')
#                 pprint.pprint(df)
                # Add node to resulting dict assigning new node id
                rnd = copy.deepcopy(other_nd)
                rnd['id'] = idx
                result['nodes'].append(rnd)
                node_name_id[rnd['name']] = rnd['id']
                node_mapping['same'][rnd['name']] = rnd
                difrc = {'from':frm, 'to':to, 'diff':'modified'}
                if not 'diffs' in rnd:
                    rnd['diffs'] = []
                rnd['diffs'].append(difrc)
                idx += 1
                nm += 1
#                 print ('\n',orig_node['name']+' found\nORIG')
#                 pprint.pprint(orig_node)
#                 print('\nOTHER')
#                 pprint.pprint(other_nd)
#                 print('\nDIFF')
#                 pprint.pprint(df)
            else:
                # Add node to resulting dict assigning new node id
                rnd = copy.deepcopy(orig_node)
                rnd['id'] = idx
                result['nodes'].append(rnd)
                node_name_id[rnd['name']] = rnd['id']
                node_mapping['deleted'][rnd['name']] = rnd
                difrc = {'from':frm, 'to':to, 'diff':'deleted'}
                if not 'diffs' in rnd:
                    rnd['diffs'] = []
                rnd['diffs'].append(difrc)
                idx += 1
                ndl += 1
#                 print ('\n',orig_node['name']+' gone')    
        # Identify new nodes only
        for other_node in clg2['nodes']:
            if not LogDBUtils.findNodeByName(other_node, clg1['nodes']):
                # Add node to resulting dict assigning new node id
                rnd = copy.deepcopy(other_node)
                rnd['id'] = idx
                result['nodes'].append(rnd)
                node_name_id[rnd['name']] = rnd['id']
                node_mapping['inserted'][rnd['name']] = rnd
                difrc = {'from':frm, 'to':to, 'diff':'inserted'}
                if not 'diffs' in rnd:
                    rnd['diffs'] = []
                rnd['diffs'].append(difrc)
                idx += 1
                ni += 1

        # Build a new json combining both links
        matched_links = []
        def link_footprint(link):
            return link['source_name']+'-'+link['target_name']

        for lk in clg1['links']:
            rlk = None
            # Check if link still exists between these endpoint in new dict
            if LogDBUtils.findLinkByEndpoints(lk, clg2):
                if lk['source_name'] in node_name_id.keys() and\
                lk['target_name'] in node_name_id.keys():
                    # Update link with new endpoint ids and add difference found
                    rlk = copy.deepcopy(lk)
                    rlk['source'] = node_name_id[lk['source_name']]
                    rlk['target'] = node_name_id[lk['target_name']]
                    difrc = {'from':frm, 'to':to, 'diff':'modified'}
                    if not 'diffs' in rlk:
                        rlk['diffs'] = []
                    rlk['diffs'].append(difrc)
#                     print ('existing link',link_footprint(rlk))
            else: # Deleted link
                rlk = copy.deepcopy(lk)
                rlk['source'] = node_name_id[lk['source_name']]
                rlk['target'] = node_name_id[lk['target_name']]
                difrc = {'from':frm, 'to':to, 'diff':'deleted'}
                if not 'diffs' in rlk:
                    rlk['diffs'] = []
                rlk['diffs'].append(difrc)
#                 print ('deleted link',link_footprint(rlk))
            if rlk:
                result['links'].append(rlk)
                matched_links.append(link_footprint(rlk))
        # Look for new links
        for lk in clg2['links']:
            # Ignored, alread processed links
            if link_footprint(lk) in matched_links:
                continue
            rlk = copy.deepcopy(lk)
            rlk['source'] = node_name_id[lk['source_name']]
            rlk['target'] = node_name_id[lk['target_name']]
            difrc = {'from':frm, 'to':to, 'diff':'inserted'}
            if not 'diffs' in rlk:
                rlk['diffs'] = []
            rlk['diffs'].append(difrc)
#             print ('new link',link_footprint(lk) )
            if rlk:
                result['links'].append(rlk)
                
#         pprint.pprint(result['links'])
        
        # Merge all globals
        funcs1 = len(clg1['globals']['functions'])
        funcs2 = len(clg2['globals']['functions'])
        protos1 = len(clg1['globals']['protocols'])
        protos2 = len(clg2['globals']['protocols'])
        trgts1 = len(clg1['globals']['targets'])
        trgt2 = len(clg2['globals']['targets'])
        result['globals']['functions'] = [x for x in set(clg1['globals']['functions']+clg2['globals']['functions'])]
        result['globals']['protocols'] = [x for x in set(clg1['globals']['protocols']+clg2['globals']['protocols'])]
        result['globals']['targets'] = [x for x in set(clg1['globals']['targets']+clg2['globals']['targets'])]
        print('Identified '+
              str(nm)+'/'+str(ndl)+'/'+str(ni)+
              ' nodes and '+
              str(lm)+'/'+str(ld)+'/'+str(li)+
              ' links modified/deleted/inserted. '+
              str(funcs1 - funcs2) + ' functions, '+
              str(protos1 - protos2) + ' protocols, '+
              str(trgts1 - trgt2) + ' targets.'
              )
        result['globals']['changes'] = {
            'from':frm,
            'to':to,
            'nodes':{'new':ni, 'modified':nm,'deleted':ndl},
            'links':{'new':li, 'modified':lm,'deleted':ld}
            }
#         pprint.pprint(result['globals'])
        return result
예제 #21
0
def findDebugGroups(startnum, endnum, mmtracefile):
    # dump that frame
    #c:\utilities\apitrace-8.0.20190414-win64\bin\apitrace.exe dump mmconsole.trace --calls=23165-23562
    args = [
        the_apitrace, 'dump', '--calls=' + startnum + '-' + endnum,
        '--color=never', mmtracefile
    ]
    proc = subprocess.run(args, capture_output=True)
    res = proc.stdout.decode("utf-8")

    #find interesting debuggroups
    # 30079 glPushDebugGroup(source = GL_DEBUG_SOURCE_APPLICATION, id = 1234, length = -1, message = "SphereRenderer::Render")
    commands = res.split(os.linesep)
    done = {}
    groupstart = 0
    groupend = 0
    currgroup = ""
    for c in commands:
        m = re.search(r'^(\d+)\s+glPopDebugGroup', c)
        if m:
            groupend = m.group(1)
            if len(groupstack) == 0:
                print("found pop without push: " + c)
            else:
                groupstart, currgroup = groupstack.pop()
                print("found end of debug group " + currgroup + " " +
                      groupstart + "-" + groupend)
                if int(groupend) - int(groupstart) == 1:
                    print("no content")
                else:
                    # dump both states
                    # c:\utilities\apitrace-msvc\x64\bin\apitrace.exe replay -D 167273 mmconsole.1.trace > before.json
                    args = [
                        the_apitrace, 'replay', '-D', groupstart, mmtracefile
                    ]
                    proc = subprocess.run(args, capture_output=True)
                    text = safeString(proc.stdout.decode("ascii"))
                    before = json.loads(text)

                    args = [
                        the_apitrace, 'replay', '-D', groupend, mmtracefile
                    ]
                    proc = subprocess.run(args, capture_output=True)
                    text = safeString(proc.stdout.decode("ascii"))
                    after = json.loads(text)

                    diffstr = jsondiff.diff(before, after, marshal=True)

                    print("found differences:")
                    json.dump(diffstr, sys.stdout, indent=2)
                    print("")
        m = re.search(r'^(\d+)\s+glPushDebugGroup', c)
        if m:
            thestart = m.group(1)
            m = re.search(r',\s+id\s+=\s+(\d+)', c)
            if m:
                m = re.search(r',\s+message\s+=\s+"(.*?)"', c)
                if m:
                    thingy = m.group(1)
                    if thingy in done:
                        print("already looked at " + thingy)
                    else:
                        print("looking at " + thingy)
                        # do not look twice. the first frame seems to be
                        # different though, so I deactivated this
                        # done[thingy] = 1
                        groupstack.append([thestart, thingy])
예제 #22
0
def get_selected_collection(collection_id, connection, api_key):
    """
    Input: Postman connection object, UUID of the collection chosen by the user, Postman API key of the user
    Description: To fetch details about all the APIs present in a specfic collection and to detect changes if any
    Returns the changes detected in the API schema
    """
    boundary = ''
    payload = ''
    headers = {
        'X-Api-Key': api_key,
        'Content-type': 'multipart/form-data; boundary={}'.format(boundary)
    }
    connection.request("GET", "/collections/" + collection_id, payload,
                       headers)
    response = connection.getresponse()
    if response.status == 200:
        data = json.loads(response.read())

        # For each collection, a separate text file is created to store the details related to the collection
        filepath = "./data/" + collection_id + ".txt"
        os.makedirs(os.path.dirname(filepath), exist_ok=True)
        if not os.path.exists(filepath):
            with open(filepath, "w") as f:
                f.write("{}")
                f.close()

        # Difference between the data received as part of the current API call and the data that previously existed in the .txt file
        # The difference is computed twice to detect changes w.r.t to addition as well as deletion of key value pairs
        with open(filepath, "r+") as f:
            old_value = diff(data, json.load(f))
            f.close()

        with open(filepath, "r+") as f:
            new_value = diff(json.load(f), data)
            f.close()

        # A list of changes in the existing API are determined
        changes_detected = [
            regex(str(value)) for value in [old_value, new_value]
        ]

        # When changes are detected, the .txt file is updated according to the new API schema
        if changes_detected:
            with open(filepath, "w+") as f:
                json.dump(data, f)
                f.close()

        # Formatting the changes detected to make it user-friendly
        keys_old = "Old name of the query paramter: " + \
            ' '.join(changes_detected[0][0])
        keys_new = "Updated name of the query parameter: " + \
            ' '.join(changes_detected[1][0])
        keys_inserted = "Name of the query parameter newly added: " + \
            ' '.join(changes_detected[1][3])
        keys_deleted = "Name of the query paramter that is deleted " + \
            ' '.join(changes_detected[0][2])

        return keys_old + "\n" + keys_new + "\n" + keys_inserted + "\n" + keys_deleted
    else:
        raise Exception("Exited with status code " + str(response.status) +
                        '. ' +
                        str(json.loads(response.read())['error']['message']))
예제 #23
0
 def test_ping_api(self):
     ping_service = self.public_services_env.component(usage="ping")
     openapi_def = ping_service.to_openapi()
     canonical_def = get_canonical_json("ping_api.json")
     self._fix_server_url(canonical_def)
     self.assertFalse(jsondiff.diff(openapi_def, canonical_def))
예제 #24
0
파일: json_diff.py 프로젝트: sjaa/scheduler
import sys
import json
from   pathlib import Path
from   jsondiff import diff

if len(sys.argv) == 3:
    old_file = Path(sys.argv[1])
    new_file = Path(sys.argv[2])
    if old_file.is_file() and new_file.is_file():
#       with open(old_file) as json_data:
        with old_file.open() as json_data:
            old_json = json.load(json_data)
#       with open(new_file) as json_data:
        with new_file.open() as json_data:
            new_json = json.load(json_data)
        if diff(old_json, new_json):
            sys.exit(1)
        else:
            sys.exit(0)

print(sys.argv)
print('json diff error: json_diff.py <new file> <old file>')
sys.exit(1)
예제 #25
0
def history_records_assemble(id, csv_dir, tar_dir, out_dir, assemble, do_diff):

    if assemble:
        if os.path.exists(out_dir):
            shutil.rmtree(out_dir)
        os.makedirs(out_dir)

        csvs = [c for c in os.listdir(csv_dir) if c.endswith(".csv")]
        paths = []

        # find all the files from the index csvs
        for c in csvs:
            tarname = c.rsplit(".", 1)[0] + ".tar.gz"
            with codecs.open(os.path.join(csv_dir, c), "rb", "utf-8") as f:
                reader = clcsv.UnicodeReader(f)
                for row in reader:
                    if row[0] == id:
                        paths.append({
                            "csv" : c,
                            "tarname" : tarname,
                            "tarpath" : row[2],
                            "date" : row[1],
                            "fileid" : row[3]
                        })

        # gather all the files in the target directory
        with codecs.open(os.path.join(out_dir, "_index." + id + ".csv"), "wb", "utf-8") as g:
            writer = clcsv.UnicodeWriter(g)
            writer.writerow(["CSV", "Tar Name", "Tar Path", "Date", "File ID"])
            for p in paths:
                tarball = tarfile.open(os.path.join(tar_dir, p["tarname"]), "r:gz")
                member = tarball.getmember(p["tarpath"])
                handle = tarball.extractfile(member)
                out = os.path.join(out_dir, p["date"] + "_" + p["fileid"] + ".json")
                with codecs.open(out, "wb", "utf-8") as f:
                    shutil.copyfileobj(handle, f)
                writer.writerow([p["csv"], p["tarname"], p["tarpath"], p["date"], p["fileid"]])

    if do_diff:
        difffile = os.path.join(out_dir, "_diff." + id + ".json")
        if os.path.exists(difffile):
            os.remove(difffile)

        # order the files and diff them into a single summary file
        # FIXME: note that this is not the standardised form of jsondiff, for some reason, but it
        # will do for now.
        changes = []
        files = [f for f in os.listdir(out_dir) if f.endswith(".json")]
        files.sort()
        for i in range(len(files) - 1):
            f1 = files[i]
            f2 = files[i + 1]
            with codecs.open(os.path.join(out_dir, f1), "rb", "utf-8") as r1, \
                    codecs.open(os.path.join(out_dir, f2), "rb", "utf-8") as r2:
                j1 = json.loads(r1.read())
                j2 = json.loads(r2.read())
                d = diff(j1, j2)
                d["_from"] = f1
                d["_to"] = f2
                d = _fix_symbols(d)
                changes.append(d)

        with codecs.open(difffile, "wb", "utf-8") as o:
            o.write(json.dumps(changes, indent=2, sort_keys=True))
예제 #26
0
 def test_partner_api(self):
     partner_service = self.private_services_env.component(usage="partner")
     openapi_def = partner_service.to_openapi()
     canonical_def = get_canonical_json("partner_api.json")
     self._fix_server_url(canonical_def)
     self.assertFalse(jsondiff.diff(openapi_def, canonical_def))
예제 #27
0
def viz(str_num, date, line, matches, dict_num_day_ordinal,
        dict_quarter_ordinal, dict_quarter_cardinal, dict_year_modifier,
        dict_year_cardinal, dict_week_ordinal, dict_week_cardinal,
        dict_month_modifier, dict_date, dict_day_cardinal, dict_month_ordinal,
        dict_month_cardinal, dict_week_modifier, dict_count, dict_day_modifier,
        dict_half_year, dict_year_ordinal, dict_quarter_modifier, id):
    #print(len(matches))
    dict_result = {}
    dict_num = 0
    date_d = {}

    result = {}
    split_index = []
    #split_index.append(0)
    for m in re.finditer(r' со | с | по | до |за | по | в |после ', line):
        index = m.start()
        split_index.append(index)
        #split_on_date = re.split(r'с |по | до ', line)
    if len(split_index) == 0:
        split_index.append(0)
    split_index.append(len(line))
    #print("INTERVALS ", split_index)
    num_split = 0
    for split_loc in split_index:
        num_split = num_split + 1
    interval = []
    result["id"] = str(id)
    id = id + 1
    r = {}
    res_atr = []
    out = {}
    id_interval = 0
    #r["DI_count"] = len(split_index)-1
    # фильтровать повторяющиеся мэтчи
    matches_arr = []
    match_num = -1
    for match in matches:
        match_num = match_num + 1
        matches_arr.append(match)
        if match_num != 0:

            match1 = matches_arr[match_num - 1].fact.as_json
            match2 = matches_arr[match_num].fact.as_json
            diff_match = jsondiff.diff(match1, match2)
            if len(diff_match) == 0:
                continue
        for i in range(len(split_index)):

            #for match in matches:

            if match.span.start >= split_index[
                    i] and match.span.stop <= split_index[i + 1]:

                #if match.span.start<:
                dict_num = dict_num + 1
                dict_match = {}
                val = []
                # interval_index = 1
                # num_di = 0
                # for split in split_index:
                #     num_di = num_di + 1
                #     if split < match.span.stop:
                #         interval_index = num_di
                #         break

                #for m in re.finditer(r'с |по | до ', line):
                #    index, item = m.start(), m.group()
                #    print(index, m, item)
                #dict_match["DI_count"] = len()
                dict_match["start"] = match.span.start
                dict_match["stop"] = match.span.stop
                annotationText = ''
                for tok in match.tokens:
                    annotationText = annotationText + " " + tok.value
                dict_match["annotationText"] = annotationText
                dict_match["word"] = match.tree.root.main.value
                before = morph.parse(match.tokens[0].value)[0]
                p = morph.parse(match.tokens[0].normalized)[0]
                try:
                    d_list = list(match.fact.as_json.items())
                    dict_match["entity"] = list(
                        match.fact.as_json.items())[0][0]
                    dict_match["value"] = list(
                        match.fact.as_json.items())[0][1]

                    #print(isinstance(dict_match["value"], int))

                    #print("DAAAAAte")
                    if (re.search("ordinal", dict_match["entity"])) and (
                            p.tag.POS == 'NUMR') and (before.tag.POS
                                                      == "ADJF"):
                        dict_match["entity_cardinal"] = dict_match[
                            "entity"].replace("ordinal", "cardinal")

                        dict_match["value_cardinal"] = 1
                    if (re.search("ordinal", dict_match["entity"])):
                        if (re.search("year", dict_match["entity"])):
                            date_d["year{}".format(
                                dict_num)] = dict_match["value"]
                        if (re.search("month", dict_match["entity"])):
                            date_d["month{}".format(
                                dict_num)] = dict_match["value"]
                    dict_result[dict_num] = dict_match
                #print("DATE",date)
                #dict_match["date"] = date
                except:
                    dict_match["entity"] = "error"
                    dict_match["value"] = "error"
                    continue

                #dict_match["annotions"] = {dict_match["start"], dict_match["stop"] }
                #result[dict_match["entity"]] = [dict_match["value"], dict_match["annotions"]]
                #if (p.tag.POS == NUMR):
                result_atr = {
                    #("DateInterval{}").format(id_interval):{
                    "value": dict_match["value"],
                    "annotations": {
                        "annotationStart": dict_match["start"],
                        "annotationEnd": dict_match["stop"],
                        "annotationText": dict_match["annotationText"]
                    },
                    #"id_interval":id_interval
                    #}
                }
                #print(result_atr, "______________")

                if (dict_match["entity"]) in result:
                    if result[(dict_match["entity"])] != result_atr:
                        result[(dict_match["entity"]).replace(
                            "_", "_end_")] = [result_atr]
                        #result[(dict_match["entity"]).replace("_", "_start_")] = result[(dict_match["entity"])]

                else:
                    result[(dict_match["entity"])] = [result_atr]
                #print("____________", result)
                r["DateInterval"] = [result]

                #"DI_index": interval_index

        #print(r)
        #print("TAAAAARG", p.tag.POS)
        #dict_result.update(dict_match)
        #print(dict_date1)
        #print(match.tree)
        #json_d.value.append(match.tokens[0].value)
            date.day_modifier.append(match.fact.day_modifier)
            date.half_year.append(match.fact.half_year)
            date.quarter_cardinal.append(match.fact.quarter_cardinal)
            date.quarter_ordinal.append(match.fact.quarter_ordinal)
            date.year_modifier.append(match.fact.year_modifier)
            date.year_cardinal.append(match.fact.year_cardinal)
            date.week_modifier.append(match.fact.week_modifier)
            date.week_ordinal.append(match.fact.week_ordinal)
            date.week_cardinal.append(match.fact.week_cardinal)
            date.month_ordinal.append(match.fact.month_ordinal)
            date.month_cardinal.append(match.fact.month_cardinal)
            date.month_modifier.append(match.fact.month_modifier)
            date.day_cardinal.append(match.fact.day_cardinal)
            date.day_ordinal.append(match.fact.day_ordinal)
            date.start_date.append(match.fact.start_date)
            date.end_date.append(match.fact.end_date)
            date.day_modifier.append(match.fact.day_modifier)
            date.half_year.append(match.fact.half_year)
            date.count.append(match.fact.count)
            date.date.append(match.fact.date)
            date.year_ordinal.append(match.fact.date)
            date.quarter_modifier.append(match.fact.date)
            # date.day.append(match.fact.day)
        if len(str(skip_none(date.quarter_modifier))) > 0:
            print("quarter_modifier: ", skip_none(date.quarter_modifier))
            dict_quarter_modifier["{}".format(line)] = skip_none(
                date.quarter_modifier)
        if len(str(skip_none(date.year_ordinal))) > 0:
            print("year_ordinal: ", skip_none(date.year_ordinal))
            dict_year_ordinal["{}".format(line)] = skip_none(date.year_ordinal)

        if len(str(skip_none(date.quarter_cardinal))) > 0:
            print("quarter_cardinal: ", skip_none(date.quarter_cardinal))
            dict_quarter_cardinal["{}".format(line)] = skip_none(
                date.quarter_cardinal)
        if len(str(skip_none(date.quarter_ordinal))) > 0:
            print("quarter_ordinal: ", skip_none(date.quarter_ordinal))
            dict_quarter_ordinal["{}".format(line)] = skip_none(
                date.quarter_ordinal)
        if len(str(skip_none(date.year_modifier))) > 0:
            print("year_modifier: ", skip_none(date.year_modifier))
            dict_year_modifier["{}".format(line)] = skip_none(
                date.year_modifier)
        if len(str(skip_none(date.year_cardinal))) > 0:
            print("year_cardinal: ", skip_none(date.year_cardinal))
            dict_year_cardinal["{}".format(line)] = skip_none(
                date.year_cardinal)

        if len(str(skip_none(date.date))) > 0:
            print("date: ", skip_none(date.date))
            dict_date["{}".format(line)] = skip_none(date.date)
        if len(str(skip_none(date.week_ordinal))) > 0:
            print("week_ordinal: ", skip_none(date.week_ordinal))
            dict_week_ordinal["{}".format(line)] = skip_none(date.week_ordinal)
        if len(str(skip_none(date.week_cardinal))) > 0:
            print("week_cardinal: ", skip_none(date.week_cardinal))
            dict_week_cardinal["{}".format(line)] = skip_none(
                date.week_cardinal)
        if len(str(skip_none(date.week_modifier))) > 0:
            print("week_modifier: ", skip_none(date.week_modifier))
            dict_week_modifier["{}".format(line)] = skip_none(
                date.week_modifier)
        if len(str(skip_none(date.month_ordinal))) > 0:
            print("month_ordinal: ", skip_none(date.month_ordinal))
            dict_month_ordinal["{}".format(line)] = skip_none(
                date.month_ordinal)
        if len(str(skip_none(date.month_cardinal))) > 0:
            print("month_cardinal: ", skip_none(date.month_cardinal))
            dict_month_cardinal["{}".format(line)] = skip_none(
                date.month_cardinal)
        if len(str(skip_none(date.month_modifier))) > 0:
            print("month_modifier: ", skip_none(date.month_modifier))
            dict_month_modifier["{}".format(line)] = skip_none(
                date.month_modifier)
        if len(str(skip_none(date.day_cardinal))) > 0:
            print("day_cardinal: ", skip_none(date.day_cardinal))
            dict_day_cardinal["{}".format(line)] = skip_none(date.day_cardinal)
        if len(str(skip_none(date.day_ordinal))) > 0:
            print("day_ordinal: ", skip_none(date.day_ordinal))
            dict_num_day_ordinal["{}".format(line)] = skip_none(
                date.day_ordinal)
        if len(str(skip_none(date.start_date))) > 0:
            print("start_date: ", skip_none(date.start_date))
            dict_start_date["{}".format(line)] = skip_none(date.start_date)
        if len(str(skip_none(date.end_date))) > 0:
            print("end_date: ", skip_none(date.end_date))
            dict_end_date["{}".format(line)] = skip_none(date.end_date)
        if len(str(skip_none(date.day_modifier))) > 0:
            print("day_modifier: ", skip_none(date.day_modifier))
            dict_day_modifier["{}".format(line)] = skip_none(date.day_modifier)
        if len(str(skip_none(date.half_year))) > 0:
            print("half_year: ", skip_none(date.half_year))
            dict_half_year["{}".format(line)] = skip_none(date.half_year)
        if len(str(skip_none(date.count))) > 0:
            print("count: ", skip_none(date.count))
            dict_count["{}".format(line)] = skip_none(date.count)
    #out[]
    return dict_result, date, result, r, id
예제 #28
0
def main():
    if (len(sys.argv) < 8):
        sys.stderr.write(
            'Usage: ./run_spark_hdfs.py <build_dir> <install_dir> <spark_master> <hdfs_namenode> <spark_deploy> <genomicsdb_version> <test_dir> [<build_type>]\n'
        )
        sys.stderr.write(
            '   Optional Argument 8 - build_type=Release|Coverage|...\n')
        sys.exit(-1)
    exe_path = sys.argv[2] + os.path.sep + 'bin'
    spark_master = sys.argv[3]
    namenode = sys.argv[4]
    jar_dir = sys.argv[1] + os.path.sep + 'target'
    spark_deploy = sys.argv[5]
    genomicsdb_version = sys.argv[6]
    test_dir = sys.argv[7]
    if (len(sys.argv) == 9):
        build_type = sys.argv[8]
    else:
        build_type = "default"
    #Switch to tests directory
    parent_dir = os.path.dirname(os.path.realpath(__file__))
    os.chdir(parent_dir)
    hostfile_path = parent_dir + os.path.sep + 'hostfile'
    vid_path = parent_dir + os.path.sep
    template_vcf_header_path = parent_dir + os.path.sep + 'inputs' + os.path.sep + 'template_vcf_header.vcf'
    tmpdir = tempfile.mkdtemp()
    ws_dir = tmpdir + os.path.sep + 'ws'
    jacoco, jacoco_report_cmd = common.setup_jacoco(
        os.path.abspath(sys.argv[1]), build_type)
    loader_tests = [
        {
            "name":
            "t0_1_2",
            'golden_output':
            'golden_outputs/t0_1_2_loading',
            'callset_mapping_file':
            'inputs/callsets/t0_1_2.json',
            "column_partitions": [[{
                "begin": 0,
                "workspace": "/tmp/ws",
                "array": "test0"
            }],
                                  [{
                                      "begin": 0,
                                      "workspace": "/tmp/ws",
                                      "array": "test1"
                                  }, {
                                      "begin": 10000,
                                      "workspace": "/tmp/ws",
                                      "array": "test2"
                                  }],
                                  [{
                                      "begin": 0,
                                      "workspace": "/tmp/ws",
                                      "array": "test3"
                                  }, {
                                      "begin": 3000,
                                      "workspace": "/tmp/ws",
                                      "array": "test4"
                                  }, {
                                      "begin": 6000,
                                      "workspace": "/tmp/ws",
                                      "array": "test5"
                                  }, {
                                      "begin": 9000,
                                      "workspace": "/tmp/ws",
                                      "array": "test6"
                                  }, {
                                      "begin": 12000,
                                      "workspace": "/tmp/ws",
                                      "array": "test7"
                                  }]],
            "query_params": [
                {
                    "query_column_ranges": [12100, 12200],
                    "golden_output": {
                        "spark": "golden_outputs/spark_t0_1_2_vcf_at_12100",
                    }
                },
                {
                    "query_column_ranges": [0, 100000],
                    "golden_output": {
                        "spark": "golden_outputs/spark_t0_1_2_vcf_at_0",
                    }
                },
                {
                    "query_column_ranges": [12150, 100000],
                    "golden_output": {
                        "spark": "golden_outputs/spark_t0_1_2_vcf_at_12150",
                    }
                },
            ]
        },
        {
            "name":
            "t0_overlapping",
            'golden_output':
            'golden_outputs/t0_overlapping',
            'callset_mapping_file':
            'inputs/callsets/t0_overlapping.json',
            "column_partitions": [[{
                "begin": 0,
                "workspace": "/tmp/ws",
                "array": "test0"
            }],
                                  [{
                                      "begin": 0,
                                      "workspace": "/tmp/ws",
                                      "array": "test1"
                                  }, {
                                      "begin": 10000,
                                      "workspace": "/tmp/ws",
                                      "array": "test2"
                                  }],
                                  [{
                                      "begin": 0,
                                      "workspace": "/tmp/ws",
                                      "array": "test3"
                                  }, {
                                      "begin": 3000,
                                      "workspace": "/tmp/ws",
                                      "array": "test4"
                                  }, {
                                      "begin": 6000,
                                      "workspace": "/tmp/ws",
                                      "array": "test5"
                                  }, {
                                      "begin": 9000,
                                      "workspace": "/tmp/ws",
                                      "array": "test6"
                                  }, {
                                      "begin": 12000,
                                      "workspace": "/tmp/ws",
                                      "array": "test7"
                                  }]],
            "query_params": [{
                "query_column_ranges": [12202, 100000],
                "golden_output": {
                    "spark": "golden_outputs/spark_t0_overlapping_at_12202",
                }
            }]
        },
        {
            "name":
            "t6_7_8",
            'golden_output':
            'golden_outputs/t6_7_8_loading',
            'callset_mapping_file':
            'inputs/callsets/t6_7_8.json',
            "column_partitions": [[{
                "begin": 0,
                "workspace": "/tmp/ws",
                "array": "test0"
            }],
                                  [{
                                      "begin": 0,
                                      "workspace": "/tmp/ws",
                                      "array": "test1"
                                  }, {
                                      "begin": 500000,
                                      "workspace": "/tmp/ws",
                                      "array": "test2"
                                  }, {
                                      "begin": 1000000,
                                      "workspace": "/tmp/ws",
                                      "array": "test3"
                                  }],
                                  [{
                                      "begin": 0,
                                      "workspace": "/tmp/ws",
                                      "array": "test4"
                                  }, {
                                      "begin": 250000,
                                      "workspace": "/tmp/ws",
                                      "array": "test5"
                                  }, {
                                      "begin": 500000,
                                      "workspace": "/tmp/ws",
                                      "array": "test6"
                                  }, {
                                      "begin": 750000,
                                      "workspace": "/tmp/ws",
                                      "array": "test7"
                                  }, {
                                      "begin": 1000000,
                                      "workspace": "/tmp/ws",
                                      "array": "test8"
                                  }]],
            "query_params": [{
                "query_column_ranges": [0, 10000000],
                "golden_output": {
                    "spark": "golden_outputs/spark_t6_7_8_vcf_at_0",
                },
                "query_block_size": 1000000,
                "query_block_size_margin": 50000
            }, {
                "query_column_ranges": [8029500, 10000000],
                "golden_output": {
                    "spark": "golden_outputs/spark_t6_7_8_vcf_at_8029500",
                },
                "query_block_size": 100000,
                "query_block_size_margin": 5000
            }, {
                "query_column_ranges": [8029500, 8029500],
                "golden_output": {
                    "spark":
                    "golden_outputs/spark_t6_7_8_vcf_at_8029500-8029500",
                }
            }]
        },
        {
            "name":
            "t0_1_2_combined",
            'golden_output':
            'golden_outputs/t0_1_2_combined',
            'callset_mapping_file':
            'inputs/callsets/t0_1_2_combined.json',
            "column_partitions": [[{
                "begin": 0,
                "workspace": "/tmp/ws",
                "array": "test0"
            }],
                                  [{
                                      "begin": 0,
                                      "workspace": "/tmp/ws",
                                      "array": "test1"
                                  }, {
                                      "begin": 10000,
                                      "workspace": "/tmp/ws",
                                      "array": "test2"
                                  }],
                                  [{
                                      "begin": 0,
                                      "workspace": "/tmp/ws",
                                      "array": "test3"
                                  }, {
                                      "begin": 3000,
                                      "workspace": "/tmp/ws",
                                      "array": "test4"
                                  }, {
                                      "begin": 6000,
                                      "workspace": "/tmp/ws",
                                      "array": "test5"
                                  }, {
                                      "begin": 9000,
                                      "workspace": "/tmp/ws",
                                      "array": "test6"
                                  }, {
                                      "begin": 12000,
                                      "workspace": "/tmp/ws",
                                      "array": "test7"
                                  }]],
            "query_params": [
                {
                    "query_column_ranges": [0, 1000000],
                    "golden_output": {
                        "spark": "golden_outputs/spark_t0_1_2_combined",
                    },
                    "query_block_size": 100000,
                    "query_block_size_margin": 5000
                },
            ]
        },
        {
            "name":
            "t0_haploid_triploid_1_2_3_triploid_deletion",
            'golden_output':
            'golden_outputs/t0_haploid_triploid_1_2_3_triploid_deletion_loading',
            'callset_mapping_file':
            'inputs/callsets/t0_haploid_triploid_1_2_3_triploid_deletion.json',
            "vid_mapping_file":
            "inputs/vid_DS_ID_phased_GT.json",
            'size_per_column_partition':
            1200,
            'segment_size':
            100,
            "column_partitions": [[{
                "begin": 0,
                "workspace": "/tmp/ws",
                "array": "test0"
            }],
                                  [{
                                      "begin": 0,
                                      "workspace": "/tmp/ws",
                                      "array": "test1"
                                  }, {
                                      "begin": 10000,
                                      "workspace": "/tmp/ws",
                                      "array": "test2"
                                  }],
                                  [{
                                      "begin": 0,
                                      "workspace": "/tmp/ws",
                                      "array": "test3"
                                  }, {
                                      "begin": 3000,
                                      "workspace": "/tmp/ws",
                                      "array": "test4"
                                  }, {
                                      "begin": 6000,
                                      "workspace": "/tmp/ws",
                                      "array": "test5"
                                  }, {
                                      "begin": 9000,
                                      "workspace": "/tmp/ws",
                                      "array": "test6"
                                  }, {
                                      "begin": 12000,
                                      "workspace": "/tmp/ws",
                                      "array": "test7"
                                  }]],
            "query_params": [{
                "query_column_ranges": [0, 1000000],
                'callset_mapping_file':
                'inputs/callsets/t0_haploid_triploid_1_2_3_triploid_deletion.json',
                "vid_mapping_file": "inputs/vid_DS_ID_phased_GT.json",
                'segment_size': 100,
                "golden_output": {
                    "spark":
                    "golden_outputs/spark_t0_haploid_triploid_1_2_3_triploid_deletion_java_vcf",
                },
                "query_block_size": 100000,
                "query_block_size_margin": 5000
            }, {
                "query_column_ranges": [0, 1000000],
                'callset_mapping_file':
                'inputs/callsets/t0_haploid_triploid_1_2_3_triploid_deletion.json',
                "vid_mapping_file": "inputs/vid_DS_ID_phased_GT.json",
                'produce_GT_field': True,
                'segment_size': 100,
                "golden_output": {
                    "spark":
                    "golden_outputs/spark_t0_haploid_triploid_1_2_3_triploid_deletion_java_vcf_produce_GT",
                },
                "query_block_size": 100000,
                "query_block_size_margin": 5000
            }]
        },
    ]
    if ("://" in namenode):
        pid = subprocess.Popen('hadoop fs -mkdir -p ' + namenode +
                               '/home/hadoop/.tiledb/',
                               shell=True,
                               stdout=subprocess.PIPE)
        stdout_string = pid.communicate()[0]
        if (pid.returncode != 0):
            sys.stderr.write('Error creating hdfs:///home/hadoop/.tiledb/')
            sys.exit(-1)
    for test_params_dict in loader_tests:
        test_name = test_params_dict['name']

        for col_part in test_params_dict['column_partitions']:
            test_loader_dict = create_loader_json(ws_dir, test_name,
                                                  test_params_dict, col_part,
                                                  test_dir)
            if (test_name == "t0_1_2"):
                test_loader_dict["compress_tiledb_array"] = True
            if ("://" in namenode):
                test_loader_dict = add_hdfs_to_loader_json(
                    test_loader_dict, namenode)
            loader_json_filename = tmpdir + os.path.sep + test_name + '-loader.json'
            with open(loader_json_filename, 'wb') as fptr:
                json.dump(test_loader_dict,
                          fptr,
                          indent=4,
                          separators=(',', ': '))
                fptr.close()
            # invoke vcf2tiledb -r <rank> where <rank> goes from 0 to num partitions
            # otherwise this only loads the first partition
            for i in range(0, len(col_part)):
                etl_cmd = exe_path + os.path.sep + 'vcf2tiledb -r ' + str(
                    i) + ' ' + loader_json_filename
                pid = subprocess.Popen(etl_cmd,
                                       shell=True,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)
                stdout_string, stderr_string = pid.communicate()
                if (pid.returncode != 0):
                    sys.stderr.write('Loading failed for test: ' + test_name +
                                     ' rank ' + str(i) + '\n')
                    sys.stderr.write('Loading command: ' + etl_cmd + '\n')
                    sys.stderr.write('Loader file :' + str(test_loader_dict) +
                                     '\n')
                    sys.stderr.write('Loading stdout: ' + stdout_string + '\n')
                    sys.stderr.write('Loading stderr: ' + stderr_string + '\n')
                    cleanup_and_exit(namenode, tmpdir, -1)
                else:
                    sys.stdout.write('Loading passed for test: ' + test_name +
                                     ' rank ' + str(i) + '\n')
            with open(loader_json_filename, 'wb') as fptr:
                json.dump(test_loader_dict,
                          fptr,
                          indent=4,
                          separators=(',', ': '))
                fptr.close()
            for query_param_dict in test_params_dict['query_params']:
                if ("://" in namenode):
                    test_query_dict = create_query_json(
                        namenode + ws_dir, test_name, query_param_dict,
                        test_dir)
                else:
                    test_query_dict = create_query_json(
                        ws_dir, test_name, query_param_dict, test_dir)
                test_query_dict[
                    'query_attributes'] = vcf_query_attributes_order
                query_json_filename = tmpdir + os.path.sep + test_name + '-query.json'
                with open(query_json_filename, 'wb') as fptr:
                    json.dump(test_query_dict,
                              fptr,
                              indent=4,
                              separators=(',', ': '))
                    fptr.close()
                spark_cmd = 'spark-submit --class TestGenomicsDBSparkHDFS --master ' + spark_master + ' --deploy-mode ' + spark_deploy + ' --total-executor-cores 1 --executor-memory 512M --conf "spark.yarn.executor.memoryOverhead=3700" --conf "spark.executor.extraJavaOptions=' + jacoco + '" --conf "spark.driver.extraJavaOptions=' + jacoco + '" --jars ' + jar_dir + '/genomicsdb-' + genomicsdb_version + '-allinone.jar ' + jar_dir + '/genomicsdb-' + genomicsdb_version + '-examples.jar --loader ' + loader_json_filename + ' --query ' + query_json_filename + ' --template_vcf_header ' + template_vcf_header_path + ' --spark_master ' + spark_master + ' --jar_dir ' + jar_dir
                if (test_name == "t6_7_8"):
                    spark_cmd = spark_cmd + ' --use-query-protobuf'
                if (test_name == "t0_overlapping"):
                    spark_cmd = spark_cmd + ' --hostfile ' + hostfile_path
                pid = subprocess.Popen(spark_cmd,
                                       shell=True,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)
                stdout_string, stderr_string = pid.communicate()
                if (pid.returncode != 0):
                    sys.stderr.write('Query test: ' + test_name +
                                     ' with query file ' +
                                     query_json_filename + ' failed\n')
                    sys.stderr.write('Spark command was: ' + spark_cmd + '\n')
                    sys.stderr.write('Spark stdout was: ' + stdout_string +
                                     '\n')
                    sys.stderr.write('Spark stderr was: ' + stderr_string +
                                     '\n')
                    sys.stderr.write('Query file was: ' +
                                     json.dumps(test_query_dict) + '\n')
                    cleanup_and_exit(namenode, tmpdir, -1)
                stdout_list = stdout_string.splitlines(True)
                stdout_list_filter = [
                    k for k in stdout_list if not k.startswith('##')
                ]
                stdout_filter = "".join(stdout_list_filter)
                md5sum_hash_str = str(hashlib.md5(stdout_filter).hexdigest())
                if ('golden_output' in query_param_dict
                        and 'spark' in query_param_dict['golden_output']):
                    golden_stdout, golden_md5sum = get_file_content_and_md5sum(
                        query_param_dict['golden_output']['spark'])
                    if (golden_md5sum != md5sum_hash_str):
                        sys.stdout.write(
                            'Mismatch in query test: ' + test_name +
                            ' with column ranges: ' +
                            str(query_param_dict['query_column_ranges']) +
                            ' and loaded with ' + str(len(col_part)) +
                            ' partitions\n')
                        print_diff(golden_stdout, stdout_filter)
                        sys.stderr.write('Spark command was: ' + spark_cmd +
                                         '\n')
                        sys.stderr.write('Spark stdout was: ' + stdout_string +
                                         '\n')
                        sys.stderr.write('Spark stderr was: ' + stderr_string +
                                         '\n')
                        sys.stderr.write('Query file was: ' +
                                         json.dumps(test_query_dict) + '\n')
                        cleanup_and_exit(namenode, tmpdir, -1)
                    else:
                        sys.stdout.write(
                            'Query test: ' + test_name +
                            ' with column ranges: ' +
                            str(query_param_dict['query_column_ranges']) +
                            ' and loaded with ' + str(len(col_part)) +
                            ' partitions passed\n')
                # add another spark run command to test datasourcev2 stuff
                if ('vid_mapping_file' in query_param_dict):
                    vid_path_final = vid_path + query_param_dict[
                        'vid_mapping_file']
                else:
                    vid_path_final = vid_path + "inputs" + os.path.sep + "vid.json"
                spark_cmd_v2 = 'spark-submit --class TestGenomicsDBDataSourceV2 --master ' + spark_master + ' --deploy-mode ' + spark_deploy + ' --total-executor-cores 1 --executor-memory 512M --conf "spark.yarn.executor.memoryOverhead=3700" --conf "spark.executor.extraJavaOptions=' + jacoco + '" --conf "spark.driver.extraJavaOptions=' + jacoco + '" --jars ' + jar_dir + '/genomicsdb-' + genomicsdb_version + '-allinone.jar ' + jar_dir + '/genomicsdb-' + genomicsdb_version + '-examples.jar --loader ' + loader_json_filename + ' --query ' + query_json_filename + ' --vid ' + vid_path_final + ' --spark_master ' + spark_master
                if (test_name == "t6_7_8"):
                    spark_cmd_v2 = spark_cmd_v2 + ' --use-query-protobuf'
                if (test_name == "t0_overlapping"):
                    spark_cmd = spark_cmd_v2 + ' --hostfile ' + hostfile_path
                pid = subprocess.Popen(spark_cmd_v2,
                                       shell=True,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)
                stdout_string, stderr_string = pid.communicate()
                if (pid.returncode != 0):
                    sys.stderr.write('Query test V2: ' + test_name +
                                     ' with query file ' +
                                     query_json_filename + ' failed\n')
                    sys.stderr.write('Spark command was: ' + spark_cmd_v2 +
                                     '\n')
                    sys.stderr.write('Spark stdout was: ' + stdout_string +
                                     '\n')
                    sys.stderr.write('Spark stderr was: ' + stderr_string +
                                     '\n')
                    sys.stderr.write('Query file was: ' +
                                     json.dumps(test_query_dict) + '\n')
                    cleanup_and_exit(namenode, tmpdir, -1)
                stdout_list = stdout_string.splitlines(True)
                stdout_filter = "".join(stdout_list)
                stdout_json = json.loads(stdout_filter)
                if ('golden_output' in query_param_dict
                        and 'spark' in query_param_dict['golden_output']):
                    json_golden = get_json_from_file(
                        query_param_dict['golden_output']['spark'] + '_v2')
                    checkdiff = jsondiff.diff(stdout_json, json_golden)
                    if (not checkdiff):
                        sys.stdout.write(
                            'Query test V2: ' + test_name +
                            ' with column ranges: ' +
                            str(query_param_dict['query_column_ranges']) +
                            ' and loaded with ' + str(len(col_part)) +
                            ' partitions passed\n')
                    else:
                        sys.stdout.write(
                            'Mismatch in query test V2: ' + test_name +
                            ' with column ranges: ' +
                            str(query_param_dict['query_column_ranges']) +
                            ' and loaded with ' + str(len(col_part)) +
                            ' partitions\n')
                        print(checkdiff)
                        sys.stderr.write('Spark stdout was: ' + stdout_string +
                                         '\n')
                        sys.stderr.write('Spark stderr was: ' + stderr_string +
                                         '\n')
                        cleanup_and_exit(namenode, tmpdir, -1)
        rc = common.report_jacoco_coverage(jacoco_report_cmd)
        if (rc != 0):
            cleanup_and_exit(namenode, tmpdir, -1)
    cleanup_and_exit(namenode, tmpdir, 0)
예제 #29
0
# Check R package specs
if set(spec_files.keys()) != set(r_files.keys()):
    for spec in set(spec_files.keys()) - set(r_files.keys()):
        print('{} missing from R package'.format(spec), file=sys.stderr)
    for spec in set(r_files.keys()) - set(spec_files.keys()):
        print('{} found in R package but missing from specs/'.format(spec), file=sys.stderr)
    sys.exit(1)

for spec_name in spec_files:
    # check equality of specs
    with open(spec_files[spec_name], 'r') as ip:
        gold_spec = yaml.load(ip)
    with open(py_files[spec_name], 'r') as ip:
        py_spec = yaml.load(ip)
    with open(r_files[spec_name], 'r') as ip:
        r_spec = yaml.load(ip)

    # Check python package
    if jsondiff.diff(gold_spec, py_spec) != {}:
        print('{} spec is different from python version'.format(spec_name), file=sys.stderr)
        print(jsondiff.diff(gold_spec, py_spec), file=sys.stderr)
        sys.exit(1)

    # Check R package
    if jsondiff.diff(gold_spec, r_spec) != {}:
        print('{} spec is different from R version'.format(spec_name), file=sys.stderr)
        print(jsondiff.diff(gold_spec, r_spec), file=sys.stderr)
        sys.exit(1)

    def get(self):
        """This get() calls the other functions for some reason(tm)."""

        bucket_name = os.environ.get(
            "BUCKET_NAME", app_identity.get_default_gcs_bucket_name()
        )

        self.response.headers["Content-Type"] = "text/plain"
        self.response.write(
            "Demo GCS Application running from Version: "
            + os.environ["CURRENT_VERSION_ID"]
            + "\n"
        )
        self.response.write("Using bucket name: " + bucket_name + "\n\n")

        version = os.environ["CURRENT_VERSION_ID"].split(".")[0]
        bucket = "/" + bucket_name
        if version == "None":
            filename = bucket + "/schedule"
            updated_filename = bucket + "/updated_schedule"
        else:
            filename = bucket + "/schedule_" + version
            updated_filename = bucket + "/updated_schedule_" + version

        # This _etag is currently unused, could be used to reduce writes to
        #  only when the schedule is updated (and to notify of updates)
        try:
            filename_etag = self.stat_file(filename).etag
            if DEBUG:
                self.response.write("etag: %s \n" % filename_etag)
        except gcs.NotFoundError:
            if DEBUG:
                print("P1")

        [totalgames, jsondata] = self.fetch_upstream_schedule(URL)

        if totalgames == 0:
            pass
        else:
            self.response.write("Total Games: %s\n" % totalgames)
            [teamdates] = self.parse_schedule(jsondata)
            content = self.make_data_json(teamdates)
            try:
                old_content = self.read_file(filename)
            except gcs.NotFoundError:
                self.create_file(filename, content)
                old_content = self.read_file(filename)
            if old_content == content:
                try:
                    last_updated = self.read_file(updated_filename)
                    self.response.write("Not updating schedule - it is current.\n")
                except gcs.NotFoundError:
                    self.create_file(updated_filename, FOR_UPDATED)
                    last_updated = self.read_file(updated_filename)
                self.response.write("Last updated: %s\n" % last_updated)
            else:
                print(
                    "Changes: %s" % (diff(json.loads(old_content), json.loads(content)))
                )
                self.response.write(
                    "Diff: %s" % diff(json.loads(old_content), json.loads(content))
                )
                self.create_file(filename, content)
                self.create_file(updated_filename, FOR_UPDATED)
                # Only send e-mails outside playoffs
                #  (potential spoilers - games are removed from the schedule)
                if CURRENT_MONTH < 4 or CURRENT_MONTH > 6:
                    self.send_an_email(
                        diff(json.loads(old_content), json.loads(content)), True
                    )
                self.response.set_status(202)
예제 #31
0
    def test_a(self):

        self.assertEqual({}, diff(1, 1))
        self.assertEqual({}, diff(True, True))
        self.assertEqual({}, diff('abc', 'abc'))
        self.assertEqual({}, diff([1, 2], [1, 2]))
        self.assertEqual({}, diff((1, 2), (1, 2)))
        self.assertEqual({}, diff({1, 2}, {1, 2}))
        self.assertEqual({}, diff({'a': 1, 'b': 2}, {'a': 1, 'b': 2}))
        self.assertEqual({}, diff([], []))
        self.assertEqual({}, diff(None, None))
        self.assertEqual({}, diff({}, {}))
        self.assertEqual({}, diff(set(), set()))

        self.assertEqual(2, diff(1, 2))
        self.assertEqual(False, diff(True, False))
        self.assertEqual('def', diff('abc', 'def'))
        self.assertEqual([3, 4], diff([1, 2], [3, 4]))
        self.assertEqual((3, 4), diff((1, 2), (3, 4)))
        self.assertEqual({3, 4}, diff({1, 2}, {3, 4}))
        self.assertEqual({replace: {'c': 3, 'd': 4}}, diff({'a': 1, 'b': 2}, {'c': 3, 'd': 4}))

        self.assertEqual({replace: {'c': 3, 'd': 4}}, diff([1, 2], {'c': 3, 'd': 4}))
        self.assertEqual(123, diff({'a': 1, 'b': 2}, 123))

        self.assertEqual({delete: ['b']}, diff({'a': 1, 'b': 2}, {'a': 1}))
        self.assertEqual({'b': 3}, diff({'a': 1, 'b': 2}, {'a': 1, 'b': 3}))
        self.assertEqual({'c': 3}, diff({'a': 1, 'b': 2}, {'a': 1, 'b': 2, 'c': 3}))
        self.assertEqual({delete: ['b'], 'c': 3}, diff({'a': 1, 'b': 2}, {'a': 1, 'c': 3}))

        self.assertEqual({add: {3}}, diff({1, 2}, {1, 2, 3}))
        self.assertEqual({add: {3}, discard: {4}}, diff({1, 2, 4}, {1, 2, 3}))
        self.assertEqual({discard: {4}}, diff({1, 2, 4}, {1, 2}))

        self.assertEqual({insert: [(1, 'b')]}, diff(['a', 'c'], ['a', 'b', 'c']))
        self.assertEqual({insert: [(1, 'b')], delete: [3, 0]}, diff(['x', 'a', 'c', 'x'], ['a', 'b', 'c']))
        self.assertEqual(
            {insert: [(2, 'b')], delete: [4, 0], 1: {'v': 20}},
            diff(['x', 'a', {'v': 11}, 'c', 'x'], ['a', {'v': 20}, 'b', 'c'])
        )
        self.assertEqual(
            {insert: [(2, 'b')], delete: [4, 0],  1: {'v': 20}},
            diff(['x', 'a', {'u': 10, 'v': 11}, 'c', 'x'], ['a', {'u': 10, 'v': 20}, 'b', 'c'])
        )
예제 #32
0
def commit(snoRoot, sessionID, DryRun=False):
    #with open("ConfigDB", "r") as f:
    #    originalsno_dict = json.loads(f.read())

    get_lock = ConfigDB.acquire_lock()

    if 'Error' in get_lock.keys():
        return get_lock
    else:
        originalsnoRoot = get_lock['ConfigDB']

    if DryRun:
        '''
        original_xml = (pybindIETFXMLEncoder.serialise(originalsnoRoot))
        new_xml = (pybindIETFXMLEncoder.serialise(snoRoot))
        diff_xml = XMLDiff(original_xml, new_xml)
        '''

        diff_xml = calculate_diff(originalsnoRoot, snoRoot)
        ConfigDB.release_lock()
        return diff_xml

    sno_dict = json.loads(pybindJSON.dumps(snoRoot))
    originalsno_dict = json.loads(pybindJSON.dumps(originalsnoRoot))
    difference = diff(originalsno_dict, sno_dict)

    #print ((originalsnoRoot.get()))
    device_config = {}
    if 'devices' in difference.keys(
    ) and 'device' in difference['devices'].keys():
        for device in difference['devices']['device'].keys():
            if 'config' in difference['devices']['device'][device]:
                config_dict = difference['devices']['device'][device]['config']
                #print ((originalsnoRoot.devices.device[device].config.get()))

                #print (config_dict)

                if device not in originalsnoRoot.devices.device.keys():
                    return {
                        "Error":
                        "Device {} not present in DB. First add the device".
                        format(device)
                    }

                original_config = originalsnoRoot.devices.device[device].config
                new_config = snoRoot.devices.device[device].config
                '''
                print (original_config)
                print (new_config)
                XMLDiff1 = XMLDiff(original_config, new_config)

                print (XMLDiff1+"\n")
                configXML_tree = list( etree.fromstring(XMLDiff1) )[0]
                configXML = etree.tostring(configXML_tree).decode()

                configXML = "<config>\n" + configXML + "</config>"
                '''

                XMLDIFF = calculate_diff(
                    original_config,
                    new_config,
                    snoObject=originalsnoRoot.devices.device[device].config)

                configXML_tree = list(etree.fromstring(XMLDIFF))[0]
                configXML = etree.tostring(configXML_tree).decode()

                configXML = "<config>\n" + configXML + "</config>"
                print(configXML)

                rev_XMLDIFF = calculate_diff(
                    new_config,
                    original_config,
                    snoObject=originalsnoRoot.devices.device[device].config)
                rev_XML_tree = list(etree.fromstring(rev_XMLDIFF))[0]
                rev_XML = etree.tostring(rev_XML_tree).decode()

                rev_XML = "<config>\n" + rev_XML + "</config>"

                device_config[device] = {}
                device_config[device]['config'] = configXML
                device_config[device]['rev_config'] = rev_XML

    if device_config:
        try:
            NetworkTransaction(device_config, originalsnoRoot)
        except Exception as e:
            return ({"Error": "Failed to commit. " + str(e)})

    return ({"ConfigDB": ConfigDB.write(snoRoot, sessionID)})
예제 #33
0
파일: test_api.py 프로젝트: Hoedic/fab_taxi
def get_diff(expected, result):
    return diff(json.loads(expected), json.loads(result))
예제 #34
0
def process_bounty_changes(old_bounty, new_bounty):
    """Process Bounty changes.

    Args:
        old_bounty (dashboard.models.Bounty): The old Bounty object.
        new_bounty (dashboard.models.Bounty): The new Bounty object.

    """
    from dashboard.utils import build_profile_pairs
    profile_pairs = None

    # check for maintainer blocks
    is_blocked = any([(ele.lower() in new_bounty.github_url.lower())
                      for ele in BlockedURLFilter.objects.values_list(
                          'expression', flat=True)])
    if is_blocked:
        raise UnsupportedRepoException(
            "This repo is not bountyable at the request of the maintainer.")

    # process bounty sync requests
    did_bsr = False
    for bsr in BountySyncRequest.objects.filter(
            processed=False, github_url=new_bounty.github_url).nocache():
        did_bsr = True
        bsr.processed = True
        bsr.save()

    # get json diff
    json_diff = diff(old_bounty.raw_data, new_bounty.raw_data) if (
        old_bounty and new_bounty) else None

    # new bounty
    if not old_bounty or (not old_bounty and new_bounty
                          and new_bounty.is_open) or (not old_bounty.is_open
                                                      and new_bounty
                                                      and new_bounty.is_open):
        is_greater_than_x_days_old = new_bounty.web3_created < (
            timezone.now() - timezone.timedelta(hours=24))
        if is_greater_than_x_days_old and not settings.IS_DEBUG_ENV:
            msg = f"attempting to create a new bounty ({new_bounty.standard_bounties_id}) when is_greater_than_x_days_old = True"
            print(msg)
            raise Exception(msg)
        event_name = 'new_bounty'
    elif old_bounty.num_fulfillments < new_bounty.num_fulfillments:
        event_name = 'work_submitted'
    elif old_bounty.is_open and not new_bounty.is_open:
        if new_bounty.status in ['cancelled', 'expired']:
            event_name = 'killed_bounty'
        else:
            event_name = 'work_done'
    elif old_bounty.value_in_token < new_bounty.value_in_token:
        event_name = 'increased_bounty'
    else:
        event_name = 'unknown_event'
        logger.info(
            f'got an unknown event from bounty {old_bounty.pk} => {new_bounty.pk}: {json_diff}'
        )

    print(f"- {event_name} event; diff => {json_diff}")

    # record a useraction for this
    record_user_action(event_name, old_bounty, new_bounty)
    record_bounty_activity(event_name, old_bounty, new_bounty)

    # Build profile pairs list
    if new_bounty.fulfillments.exists():
        profile_pairs = build_profile_pairs(new_bounty)

    # Send an Email if this is a LowBall bounty
    try:
        if (not old_bounty
                or old_bounty.value_in_usdt != new_bounty.value_in_usdt):
            if is_lowball_bounty(new_bounty.value_in_usdt
                                 ) and new_bounty.network == 'mainnet':
                notify_of_lowball_bounty(new_bounty)
    except Exception as e:
        logger.error(f'{e} during check for Lowball Bounty')

    # marketing
    if event_name != 'unknown_event':
        print("============ posting ==============")
        did_post_to_slack = maybe_market_to_slack(new_bounty, event_name)
        did_post_to_user_slack = maybe_market_to_user_slack(
            new_bounty, event_name)
        did_post_to_github = maybe_market_to_github(new_bounty, event_name,
                                                    profile_pairs)
        did_post_to_email = maybe_market_to_email(new_bounty, event_name)
        print("============ done posting ==============")

        # what happened
        what_happened = {
            'did_bsr': did_bsr,
            'did_post_to_email': did_post_to_email,
            'did_post_to_github': did_post_to_github,
            'did_post_to_slack': did_post_to_slack,
            'did_post_to_user_slack': did_post_to_user_slack,
            'did_post_to_twitter': False,
        }

        print("changes processed: ")
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(what_happened)
    else:
        print('No notifications sent - Event Type Unknown = did_bsr: ',
              did_bsr)
def test_replace_all_classes():
    current = {
        "azure.ai.formrecognizer": {
            "class_nodes": {}
        }
    }

    stable = {
        "azure.ai.formrecognizer": {
            "class_nodes": {
                "class_name": {
                    "methods": {
                        "one": {
                            "parameters": {
                                "testing": {
                                    "default": None,
                                    "param_type": "positional_or_keyword"
                                }
                            },
                            "is_async": True
                        },
                        "two": {
                            "parameters": {
                                "testing2": {
                                    "default": None,
                                    "param_type": "positional_or_keyword"
                                }
                            },
                            "is_async": True
                        },
                    }
                },
                "class_name2": {
                    "methods": {
                        "one": {
                            "parameters": {
                                "testing": {
                                    "default": None,
                                    "param_type": "positional_or_keyword"
                                }
                            },
                            "is_async": True
                        },
                        "two": {
                            "parameters": {
                                "testing2": {
                                    "default": None,
                                    "param_type": "positional_or_keyword"
                                }
                            },
                            "is_async": True
                        },
                    }
                },
            }
        }
    }

    EXPECTED = [
        "(RemovedOrRenamedClass): The model or publicly exposed class 'azure.ai.formrecognizer.class_name' was deleted or renamed in the current version",
        "(RemovedOrRenamedClass): The model or publicly exposed class 'azure.ai.formrecognizer.class_name2' was deleted or renamed in the current version"
    ]

    diff = jsondiff.diff(stable, current)
    bc = BreakingChangesTracker(stable, current, diff, "azure-storage-queue")
    bc.run_checks()

    assert len(bc.breaking_changes) == len(EXPECTED)
    for message in bc.breaking_changes:
        assert message in EXPECTED
예제 #36
0
    def diff(self, kind, inmodel, dbmodel, hide_defaults=True):
        """Create a diff between input and existing model.

        Args:
            kind (str): kind of object to diff.
            inmodel (model): Description
            dbmodel (model): Description
            hide_defaults (bool, optional):
                hide values that are defaulted into db

        Returns:
            dict: Diff
        """
        # Check for empty inputs
        if inmodel is None:
            return None
        if dbmodel is None:
            dbmodel = {}
        else:
            # Transform django object to dictionary.
            dbmodel = dbmodel.__dict__

        context = {'section', 'course', 'semester', 'textbook'}

        whats = {}
        for k, v in inmodel.iteritems():
            if k not in context:
                continue
            try:
                whats[k] = str(v)
            except (django.utils.encoding.DjangoUnicodeDecodeError,
                    UnicodeEncodeError):
                whats[k] = '<{}: [Bad Unicode data]'.format(k)

        # Remove db specific content from model.
        blacklist = context | {
            '_state',
            'id',
            'section_id',
            'course_id',
            '_course_cache',
            'semester_id',
            '_semester',
            'vector',
        }

        def prune(d):
            return {k: v for k, v in d.iteritems() if k not in blacklist}

        dbmodel = prune(dbmodel)
        inmodel = prune(inmodel)

        if 'course' in dbmodel:
            dbmodel['course'] = str(dbmodel['course'])

        # Remove null values from dictionaries.
        dbmodel = {k: v for k, v in dbmodel.iteritems() if v is not None}

        # Move contents of default dictionary to first-level of dictionary.
        if 'defaults' in inmodel:
            defaults = inmodel['defaults']
            del inmodel['defaults']
            inmodel.update(defaults)

        # Diff the in-model and db-model
        diffed = json.loads(
            jsondiff.diff(dbmodel, inmodel, syntax='symmetric', dump=True))

        # Remove db defaulted values from diff output.
        if hide_defaults and '$delete' in diffed:
            self.remove_defaulted_keys(kind, diffed['$delete'])
            if len(diffed['$delete']) == 0:
                del diffed['$delete']

        # Add `what` and `context` tag to diff output.
        if len(diffed) > 0:
            if isinstance(diffed, list) and len(diffed[0]) == 0:
                diffed = {'$new': diffed[1]}
            elif isinstance(diffed, dict):
                diffed.update({'$what': inmodel})
            diffed.update({'$context': whats})
            self.json_streamer.write(diffed)
        return diffed
예제 #37
0
#!/usr/bin/env python
import json
from jsondiff import diff

with open('hw.json') as f:
    json_data = f.read()
json_prev = json.loads(json_data)

with open('hw_new.json') as f:
    json_data = f.read()
json_new = json.loads('hw_new.json')

diff(json_prev, json_new)
예제 #38
0
def diff_json(a, b):
	a_json = json.loads(a.encode("utf-8"))
	b_json = json.loads(b.encode("utf-8"))
	raw_diff = diff(a_json, b_json, syntax='symmetric')

	good_diff = {}
	for element in raw_diff:
		if str(element) == "$insert":
			for block in raw_diff[element]:
				good_diff[block[0]] = ["",block[1]]
		elif str(element) == "$delete":
			for block in raw_diff[element]:
				good_diff[block[0]] = [block[1],""]
		else:
			block_diff = raw_diff[element]['value']
			old = {}
			new = {}
			old_dict = {}
			new_dict = {}
			try:
				for value in block_diff:
					old[value] = block_diff[value][0]
					new[value] = block_diff[value][1]
				old_dict['type'] = b_json[element]['type']
				new_dict['type'] = b_json[element]['type']
				old_dict['value'] = old
				new_dict['value'] = new
				good_diff[element] = [old_dict, new_dict]
			except TypeError:
				old_dict['type'] = "Changed Blocktype"
				new_dict['type'] = "Changed Blocktype"
				old = {}
				new = {}
				old_str = ""
				for (key, val) in block_diff[0].items():
					if isinstance(val, str):
						old_str += key + ":    " + val.encode('ascii', 'xmlcharrefreplace') + "\n"
					elif isinstance(val, int):
						old_str += key + ":    " + str(val).encode('ascii', 'xmlcharrefreplace') + "\n"
					else:
						old_str += key + ":    Empty\n"
				old[raw_diff[element]['type'][0]] = old_str
				new_str = ""
				for (key, val) in block_diff[1].items():
					if isinstance(val, str):
						new_str += key + ":    " + val.encode('ascii', 'xmlcharrefreplace') + "\n"
					elif isinstance(val, int):
						new_str += key + ":    " + str(val).encode('ascii', 'xmlcharrefreplace') + "\n"
					else:
						new_str += key + ":    Empty\n"
				new[raw_diff[element]['type'][1]] = new_str
				old_dict['value'] = old
				new_dict['value'] = new
				good_diff[element] = [old_dict, new_dict]
				print good_diff[element]
			except:
				if 'type' in raw_diff[element]:
					old_dict['type'] = raw_diff[element]['type']
					new_dict['type'] = raw_diff[element]['type']
				old = {}
				new = {}
				old['Complicated Changes'] = block_diff
				new['Complicated Changes'] = block_diff
				old_dict['value'] = old
				new_dict['value'] = new
				good_diff[element] = [old_dict, new_dict]

	return good_diff
예제 #39
0
def uploadLogoFile():
    # This is the path to the upload directory
    duset_name = ""
    # These are the extension that we are accepting to be uploaded
    ALLOWED_EXTENSIONS = ['png', 'jpg', 'jpeg', 'gif', 'bmp']

    re.compile("active", re.IGNORECASE)
    # Get the name of the uploaded file
    file = request.files.get('logo')
    duset_id = request.form.get('duset_id')
    if duset_id is None:
        raise Exception("duset_id not found in request")
    if deploymentUnitSetDB.GetDeploymentUnitSetById(duset_id, False) is None:
        raise Exception("No such DU SET exists")
    filename = None
    if file is not None:
        filename = ('.' in file.filename and str(
            file.filename.rsplit('.', 1)[1]).lower() in ALLOWED_EXTENSIONS)
    else:
        raise Exception("File not found in request.")
    if filename not in [True]:
        raise Exception(
            "Invalid file .Please select file 'png', 'jpg', 'jpeg', 'gif'")


# Check if the file is one of the allowed types/extensions
    if file and filename:
        # Make the filename safe, remove unsupported chars
        filename = secure_filename(file.filename)
        file_extension = filename.split(".")[-1]
        result1 = deploymentUnitSetDB.GetDeploymentUnitSetById(duset_id, False)
        duset_name = '_'.join(result1["name"].split())
        file_path = str(logo_full_path + '/' + duset_name + '_' + duset_id +
                        '.' + file_extension)
        thumbnail_file_path = (str(logo_full_path + '/' + duset_name + '_' +
                                   duset_id + '_thumbnail.' + file_extension))
        current_logo_file = str(logo_path + '/' + duset_name + '_' + duset_id +
                                '.' + file_extension)
        current_thumbnail_file = str(logo_path + '/' + duset_name + '_' +
                                     duset_id + '_thumbnail.' + file_extension)
        db_logo_file = result1.get("logo")
        # db_thumbnail_file=result1.get("thumbnail_logo")
        file.save(file_path)
        if not os.path.isfile(file_path):
            raise Exception("Failed to save logo to path :" + file_path)
        if FileUtils.thumbnail(file_path, thumbnail_file_path) is None:
            current_thumbnail_file = current_logo_file
        if not os.path.isfile(thumbnail_file_path):
            current_thumbnail_file = current_logo_file

        if db_logo_file is None:
            #                 updated=tooldb.add_tool_logo(current_logo_file,current_thumbnail_file,duset_id)
            updated = deploymentUnitSetDB.add_du_set_logo(
                current_logo_file, current_thumbnail_file, duset_id)
        else:
            sub = diff(db_logo_file, current_logo_file)
            if sub:
                print "Du Set API:compare: logo " + db_logo_file + " is required to be updated as the logo data has changed."
                # updated=tooldb.add_tool_logo(current_logo_file,current_thumbnail_file,duset_id)
                updated = deploymentUnitSetDB.add_du_set_logo(
                    current_logo_file, current_thumbnail_file, duset_id)
        return jsonify(
            json.loads(
                dumps({
                    "result":
                    "success",
                    "message":
                    "File and Deployment Unit Set was updated successfully and is readable"
                }))), 200
예제 #40
0
def run_compare(module, count, test):
    class CustomFilter:
        def filter_jmespath(self, test, legal_json_diff):
            plugins_filter = {
                "ntp": "peers.{delete: delete, insert: insert}",
                "vlan": "vlans.{delete: delete, insert: insert}",
                "as_path": "activeIpAsPathLists.{delete: delete, insert: insert}",
                "lldp": "lldpNeighbors.{delete: delete, insert: insert}",
            }

            if plugins_filter.get(test):
                final_diff = search(plugins_filter.get(test), legal_json_diff)
            elif test == "interface":
                final_diff = CustomFilter().filter_iface_counters(
                    legal_json_diff
                )
            elif test == "acl":
                final_diff = CustomFilter().filter_acls_counters(
                    legal_json_diff
                )
            else:
                final_diff = legal_json_diff

            return final_diff

        def filter_iface_counters(self, legal_json_diff):
            return_dict = {"interfaces": {}}

            for ifaces in legal_json_diff.values():
                for iface_name, iface_values in ifaces.items():

                    if iface_values.get("interfaceStatus"):
                        return_dict["interfaces"][
                            iface_name
                        ] = legal_json_diff["interfaces"][iface_name][
                            "interfaceStatus"
                        ]

                    if iface_values.get("memberInterfaces"):
                        if iface_values["memberInterfaces"].get(
                            "delete"
                        ) or iface_values["memberInterfaces"].get("insert"):
                            return_dict["interfaces"][
                                iface_name
                            ] = legal_json_diff["interfaces"][iface_name][
                                "memberInterfaces"
                            ]

                    if iface_name == "insert" or iface_name == "delete":
                        return_dict["interfaces"][
                            iface_name
                        ] = legal_json_diff["interfaces"][iface_name]

            return return_dict

        def filter_acls_counters(self, legal_json_diff):

            for acls in legal_json_diff.values():
                for acl_change in acls.keys():
                    if acl_change != 'insert' or acl_change != 'delete' or acl_change is not None:
                        del legal_json_diff['aclList'][acl_change]

                        return legal_json_diff

    def replace(string, test):
        substitutions = {
            "'": '"',
            "insert": '"insert"',
            "delete": '"delete"',
            "True": "true",
            "False": "false",
            "(": "[",
            ")": "]",
            " u'": "\"",
            "{u'": "{\"",
            "[u'": "[\"",
            " u\"": "\"",
            "{u\"": "{\"",
            "[u\"": "[\"",
        }

        skip_list = ["vrf"]

        substrings = sorted(substitutions, key=len, reverse=True)
        regex = re.compile("|".join(map(re.escape, substrings)))
        sub_applied = regex.sub(
            lambda match: substitutions[match.group(0)], string
        )

        if test not in skip_list:
            # ('{', '28: ') (' ', '187: ')
            for integer in re.findall(r"(\s|{)(\d+:\s)", sub_applied):
                int_replacement = integer[1][:-2]
                merged_integer = integer[0] + integer[1]

                if integer[0] == '{':
                    sub_applied = sub_applied.replace(
                        # double {{ required by format to exscape {
                        merged_integer, '{{"{0}": '.format(int_replacement)
                    )

                else:
                    sub_applied = sub_applied.replace(
                        merged_integer, ' "{0}": '.format(int_replacement)
                    )

        return sub_applied

    before_file = module.params.get("files")[0]
    after_file = module.params.get("files")[1]
    filter_flag = module.params.get("filter")
    host = module.params.get("hostname")
    root_path = module.params.get("root_path")

    if len(before_file) == len(after_file):

        try:
            before = open(
                "{root_path}/tests/before/{test}/{host}/{before_file}.json".format(
                    root_path=root_path,
                    test=test,
                    host=host,
                    before_file=str(before_file[count]),
                ),
                "r",
            )
        except FileNotFoundError as error:
            module.fail_json(msg=error)

        try:
            after = open(
                "{root_path}/tests/after/{test}/{host}/{after_file}.json".format(
                    root_path=root_path,
                    test=test,
                    host=host,
                    after_file=str(after_file[count]),
                ),
                "r",
            )
        except FileNotFoundError as error:
            module.fail_json(msg=error)

        destination = "{root_path}/tests/diff/{test}/{host}/".format(
            root_path=root_path,
            test=test,
            host=host,
        )

        if not os.path.exists(destination):
            os.makedirs(destination)

        json_diff = str(diff(before, after, load=True, syntax="symmetric"))
        legal_json_diff = replace(json_diff, test)

        try:
            if not filter_flag:
                final_diff = json.loads(legal_json_diff)

            if filter_flag:
                final_diff = CustomFilter().filter_jmespath(
                    test, json.loads(legal_json_diff)
                )
        except ValueError as error:
            module.fail_json(msg="Diff file not legal:\n{0}".format(legal_json_diff))

        diff_file_id = str(round(time.time())) + '_' + str(
            (int(before_file[count]) - int(after_file[count])) * -1
        )

        try:
            with open("{0}{1}.json".format(destination, diff_file_id), "w") as file:
                json.dump(final_diff, file, ensure_ascii=False, indent=4)

        except IOError:
            module.fail_json(msg="Something went wrong when writing to the file")

    return final_diff
 def get_json_value(self, element_name, task_dfn, expected_val):
     self.assertFalse(
         diff(json.loads(run(element_name, task_dfn)),
              json.loads(expected_val)))
예제 #42
0
    def processDataset(self, dataset, resources):

        if not dataset:
            raise Exception(
                _("Fehler: Kein Datensatz zum Erstellen in datasetuploader.processDataset()"
                  ))

        if self.limit:
            logging.debug(_("Datensatz: %s/%s"), self.dataset_count,
                          self.limit)

        if self.dataset_count >= self.limit:
            logging.info(
                _("Datensatz wird übersprungen. Limit von %s erreicht."),
                self.limit)
            return None

        logging.info(_("Bearbeite Datensatz: '%s'"), dataset)
        logging.debug(_("Resourcen: %s"), resources)

        node_id = dataset.getValue(Dataset.NODE_ID)
        if node_id:
            # update existing dataset by node_id
            node_id = self.updateDataset(dataset)

        elif dataset.getValue(Dataset.DATASET_ID):
            # update by package_id
            package_id = dataset.getValue(Dataset.DATASET_ID)
            if not node_id:
                remote_url = config.x_api_find_node_id.format(package_id)
                r = requests.get(remote_url)
                node_search = json.load(r.text)

                if node_search[0]['nid']:
                    node_id = node_search[0]['nid']
                    dataset.set(Dataset.NODE_ID, node_id)

                node_id = self.updateDataset(dataset)
            else:
                logging.error(
                    _("Datensatz mit der Package-ID '%s' wurde nicht gefunden"
                      ), package_id)

        else:
            # create new dataset
            self.dataset_count += 1
            node_id = dkanhandler.create(dataset)
            logging.debug(_("NEUE Dataset-ID: %s"), node_id)

        if not node_id:
            raise Exception(
                _("Fehler beim Erstellen oder beim Update des Datensatzes"))

        elif node_id == '-':
            return None

        raw_dataset = dkanhandler.getDatasetDetails(node_id)

        # Summary with changes
        raw_dataset2 = dkanhelpers.HttpHelper.read_dkan_node(node_id)
        logging.debug(_(" == Datensatz-Änderung: == "))
        logging.debug(diff(raw_dataset, raw_dataset2))

        # add or update resources
        if not self._ignore_resources:
            self.processResources(raw_dataset, resources)

        return node_id
def test_replace_all_params():
    current = {
        "azure.ai.formrecognizer": {
            "function_nodes": {
                "my_function_name": {
                    "parameters": {},
                    "is_async": False
                }
            },
            "class_nodes": {
                "class_name": {
                    "methods": {
                        "one": {
                            "parameters": {},
                            "is_async": True
                        },
                        "two": {
                            "parameters": {},
                            "is_async": True
                        },
                    }
                }
            }
        }
    }

    stable = {
        "azure.ai.formrecognizer": {
            "function_nodes": {
                "my_function_name": {
                    "is_async": False,
                    "parameters": {
                        "testing": {
                            "default": None,
                            "param_type": "positional_or_keyword"
                        },
                        "testing2": {
                            "default": None,
                            "param_type": "positional_or_keyword"
                        }
                    }
                }
            },
            "class_nodes": {
                "class_name": {
                    "methods": {
                        "one": {
                            "parameters": {
                                "testing": {
                                    "default": None,
                                    "param_type": "positional_or_keyword"
                                }
                            },
                            "is_async": True
                        },
                        "two": {
                            "parameters": {
                                "testing2": {
                                    "default": None,
                                    "param_type": "positional_or_keyword"
                                }
                            },
                            "is_async": True
                        },
                    }
                }
            }
        }
    }

    EXPECTED = [
        "(RemovedOrRenamedPositionalParam): The 'azure.ai.formrecognizer.class_name method 'one' had its 'positional_or_keyword' parameter 'testing' deleted or renamed in the current version",
        "(RemovedOrRenamedPositionalParam): The 'azure.ai.formrecognizer.class_name method 'two' had its 'positional_or_keyword' parameter 'testing2' deleted or renamed in the current version",
        "(RemovedOrRenamedPositionalParam): The function 'azure.ai.formrecognizer.my_function_name' had its 'positional_or_keyword' parameter 'testing' deleted or renamed in the current version",
        "(RemovedOrRenamedPositionalParam): The function 'azure.ai.formrecognizer.my_function_name' had its 'positional_or_keyword' parameter 'testing2' deleted or renamed in the current version"
    ]

    diff = jsondiff.diff(stable, current)
    bc = BreakingChangesTracker(stable, current, diff, "azure-storage-queue")
    bc.run_checks()

    assert len(bc.breaking_changes) == len(EXPECTED)
    for message in bc.breaking_changes:
        assert message in EXPECTED
예제 #44
0
 def test_dump(self, scenario):
     a, b = scenario
     diff(a, b, syntax='compact', dump=True)
     diff(a, b, syntax='explicit', dump=True)
     diff(a, b, syntax='symmetric', dump=True)
예제 #45
0
def json_deep_equals(expected, actual):
    result = jsondiff.diff(expected, actual)
    return result == {}
예제 #46
0
    with open(v + '/' + v, "w") as f:
        json.dump(json_data, f, indent=1)

    # Cleanup
    for file in glob(v + '/' + '*.json'):
        remove(file)
    if path.exists(v + '/' + v):
        rename(v + '/' + v, v + '/' + v + '.json')

    # Compare
    print("Comparing")
    with open(v + '/' + 'old_' + v, 'r') as o, open(v + '/' + v + '.json',
                                                    'r') as n:
        old = json.load(o)
        new = json.load(n)
        c = diff(old, new)
        if c:
            print("New changes found!")
            with open(v + '/' + 'changes', "w") as f:
                json.dump(c, f, indent=1)
            changes = True
        else:
            print("No changes found!")
            if path.exists(v + '/' + 'changes'):
                remove(v + '/' + 'changes')
            changes = False

    # commit, push and send
    if changes is True:
        # Notify
        rom = str(v.replace('_', ' '))
예제 #47
0
def get_changes(previous_id, revision_id):
	changes = []
	revision = PageRevision.objects.get(pk=revision_id)
	page_2 = revision.content_json
	prev_revision = PageRevision.objects.get(pk=previous_id)
	page_1 = prev_revision.content_json
	p1_dict = json.loads(page_1)['body']
	p2_dict = json.loads(page_2)['body']
	a_json = json.loads(p1_dict.encode("utf-8"))
	b_json = json.loads(p2_dict.encode("utf-8"))
	raw_diff = diff(a_json, b_json, syntax='symmetric')
	for element in raw_diff:
		if str(element) == "$insert":
			fields = []
			for block in raw_diff[element]:
				fields.append({
						"value": "#" +  str(block[0]) + ": " + block[1]['type'],
						"short": True
					})
			changes.append(
				{
					"title": "Added Blocks",
					"fields": fields,
					"color": "good"
				}
				)
		elif str(element) == "$delete":
			fields = []
			for block in raw_diff[element]:
				fields.append({
						"value": "#" +  str(block[0]) + ": " + block[1]['type'],
						"short": True
					})
			changes.append(
				{
					"title": "Removed Blocks",
					"fields": fields,
					"color": "danger"
				}
				)
		else:
			block_diff = raw_diff[element]['value']
			fields = []
			try:
				for value in block_diff:
					fields.append(
						{
							"value": str(value),
							"short": True
						})
				block_type = str(b_json[element]['type'])
			except:
				block_type = str(raw_diff[element]['type'][0])+" to: "+str(raw_diff[element]['type'][1])
				fields.append(
					{
						"value": "Complicated Changes: Details in Link"
					})
			changes.append(
				{
					"title": "Changed Block #" + str(element) + ": " + block_type,
					"fields": fields,
					"color": "warning"
				}
				)
	if not changes:
		changes.append(
			{
				"title": "No fields were changed",
				"color": "good"
			})
	return changes