def record_diff(old, new):
    """Return a JSON-compatible structure capable turn the `new` record back
    into the `old` record. The parameters must be structures compatible with
    json.dumps *or* strings compatible with json.loads. Note that by design,
    `old == record_patch(new, record_diff(old, new))`"""
    old, new = _norm_json_params(old, new)
    return json_delta.diff(new, old, verbose=False)
Ejemplo n.º 2
0
def record_diff(old, new):
    """Return a JSON-compatible structure capable turn the `new` record back
    into the `old` record. The parameters must be structures compatible with
    json.dumps *or* strings compatible with json.loads. Note that by design,
    `old == record_patch(new, record_diff(old, new))`"""
    old, new = _norm_json_params(old, new)
    return json_delta.diff(new, old, verbose=False)
Ejemplo n.º 3
0
    def get(self, url):
        status_codes = []
        content = []
        full_url = '{}/a/{}/{}'.format(self.base_url, self.domain, url)
        content_type = None
        for i, s in enumerate(self.sessions):
            resp = s.get(full_url)
            status_codes.append(resp.status_code)
            content.append(resp.text)
            content_type = resp.headers.get('content-type')

        self.stdout('\n{}\n{}'.format(full_url, status_codes))
        if not len(set(status_codes)) == 1:
            self.print_diff(url, 'status_code', status_codes)

        if content[0] != content[1]:
            if content_type == 'application/json':
                diff = json_delta.diff(json.loads(content[0]),
                                       json.loads(content[1]),
                                       verbose=False)
                pprint(diff, indent='8')
            else:
                try:
                    _check_shared(content[0], content[1], LHTMLOutputChecker(),
                                  "html")
                except AssertionError as e:
                    self.stderr(str(e))
Ejemplo n.º 4
0
def getDictDiff(d1, d2):
    listDiffs = []
    diffs = json_delta.diff(d1,d2, verbose=False)
    for diff in diffs:
        for keyPath in diff:
            listDiffs.append('.'.join([ str(key) for key in keyPath ]))
    return listDiffs
Ejemplo n.º 5
0
 def log_diff(self, control_value, candidate_value):
     diff = json_delta.diff(control_value, candidate_value, verbose=False)
     CitusDashboardDiff.objects.create(
         data_source=self.context['data_source'],
         context=self.context,
         control=control_value,
         candidate=candidate_value,
         diff=diff
     )
Ejemplo n.º 6
0
 def log_diff(self, control_value, candidate_value):
     diff = json_delta.diff(control_value, candidate_value, verbose=False)
     ReportComparisonDiff.objects.create(
         domain=self.context['domain'],
         control_report_config_id=self.context['report_config_id'],
         candidate_report_config_id=self.context['new_report_config_id'],
         filter_values=self.context['filter_values'],
         control=control_value,
         candidate=candidate_value,
         diff=diff,
     )
Ejemplo n.º 7
0
 def log_diff(self, control_value, candidate_value):
     diff = json_delta.diff(control_value, candidate_value, verbose=False)
     diff_logger.info(
         "",
         extra={
             'domain': self.context['domain'],
             'report_config_id': self.context['report_config_id'],
             'filter_values': self.context['filter_values'],
             'control': json.dumps(control_value),
             'diff': json.dumps(diff)
         })
Ejemplo n.º 8
0
 def log_diff(self, control_value, candidate_value):
     diff = json_delta.diff(control_value, candidate_value, verbose=False)
     diff_logger.info(
         "",
         extra={
             'domain': self.context['domain'],
             'report_config_id': self.context['report_config_id'],
             'filter_values': self.context['filter_values'],
             'control': json.dumps(control_value, cls=DjangoJSONEncoder),
             'diff': json.dumps(diff, cls=DjangoJSONEncoder)
         })
Ejemplo n.º 9
0
def compare_json(str1, str2):
    """Compare two JSON strings.

    Returns diff and changes required.
    """
    json1 = json.loads(zap_bnodes(str1))
    json2 = json.loads(zap_bnodes(str2))
    diff = json_delta.diff(json1, json2, minimal=True, verbose=False)
    if (len(diff) > 0):
        return (diff[0], diff[1])
    else:
        return ('', '')
Ejemplo n.º 10
0
 def update_history(self):
     if len(self.history) == 0:
         last_step_bson = HistoryStep(
             public_view={},
             player_views={p.id: {}
                           for p in self.config.players}).to_mongo()
     else:
         last_step_bson = self.history[-1].to_mongo()
     step = HistoryStep(log_message=' '.join(self.current_step_log))
     pv = self.get_public_view()
     step.public_view = pv
     step.public_view_delta = json_delta.diff(last_step_bson['public_view'],
                                              pv,
                                              verbose=False)
     for p in self.config.players:
         spv = self.get_player_view(p)
         step.player_views[p.id] = spv
         step.player_view_deltas[p.id] = json_delta.diff(
             last_step_bson['player_views'][p.id], spv, verbose=False)
     self.history.append(step)
     self.current_step_log = []
def build_change_log(old_report, new_report):
    change_log = ChangeLog(old_report, new_report)

    result = diff(old_report, new_report)

    for diff_line in result:
        # Operations
        if diff_line[0][0] == "operations":
            change_log.operation(diff_line)
        else:
            change_log.models(diff_line)

    return change_log
Ejemplo n.º 12
0
    def log_diff(self, control_value, candidate_value):
        # handle serialization of Decimals and dates
        control_value = json.loads(json.dumps(control_value, cls=CommCareJSONEncoder))
        candidate_value = json.loads(json.dumps(candidate_value, cls=CommCareJSONEncoder))

        diff = json_delta.diff(control_value, candidate_value, verbose=False)
        CitusDashboardDiff.objects.create(
            data_source=self.context['data_source'],
            context=self.context,
            control=control_value,
            candidate=candidate_value,
            diff=diff
        )
Ejemplo n.º 13
0
def update_clusters(cluster_discoverer, query_cluster: callable, store, query_interval: float=5, debug: bool=False):
    while True:
        lock = store.acquire_lock()
        if lock:
            try:
                clusters = cluster_discoverer.get_clusters()
                cluster_ids = set()
                for cluster in clusters:
                    cluster_ids.add(cluster.id)
                    status = store.get_cluster_status(cluster.id)
                    now = time.time()
                    if now < status.get('last_query_time', 0) + query_interval:
                        continue
                    backoff = status.get('backoff')
                    if backoff and now < backoff['next_try']:
                        # cluster is still in backoff, skip
                        continue
                    try:
                        logger.debug('Querying cluster {} ({})..'.format(cluster.id, cluster.api_server_url))
                        data = query_cluster(cluster)
                    except Exception as e:
                        backoff = handle_query_failure(e, cluster, backoff)
                        status['backoff'] = backoff
                        store.publish('clusterstatus', {'cluster_id': cluster.id, 'status': status})
                    else:
                        status['last_query_time'] = now
                        if backoff:
                            logger.info('Cluster {} ({}) recovered after {} tries.'.format(cluster.id, cluster.api_server_url, backoff['tries']))
                            del status['backoff']
                        old_data = store.get_cluster_data(data['id'])
                        if old_data:
                            # https://pikacode.com/phijaro/json_delta/ticket/11/
                            # diff is extremely slow without array_align=False
                            delta = json_delta.diff(old_data, data, verbose=debug, array_align=False)
                            store.publish('clusterdelta', {'cluster_id': cluster.id, 'delta': delta})
                            if delta:
                                store.set_cluster_data(cluster.id, data)
                        else:
                            logger.info('Discovered new cluster {} ({}).'.format(cluster.id, cluster.api_server_url))
                            # first send status with last_query_time!
                            store.publish('clusterstatus', {'cluster_id': cluster.id, 'status': status})
                            store.publish('clusterupdate', data)
                            store.set_cluster_data(cluster.id, data)
                    store.set_cluster_status(cluster.id, status)
                store.set_cluster_ids(cluster_ids)
            except:
                logger.exception('Failed to update')
            finally:
                store.release_lock(lock)
        # sleep 1-2 seconds
        gevent.sleep(min(random_jitter(1), query_interval))
Ejemplo n.º 14
0
def build_change_log(old_report, new_report):
    change_log = ChangeLog(old_report, new_report)

    # when diff result is large,  compare_lengths=True may cause wrong result
    result = diff(old_report, new_report, compare_lengths=False)

    for diff_line in result:
        # Operations
        if diff_line[0][0] == "operations":
            change_log.operation(diff_line)
        else:
            change_log.models(diff_line)

    return change_log
Ejemplo n.º 15
0
    def test_load_raw_data(self):

        name = "lesbos"
        locationData = getEmptyLocationDataObject(name)

        data = ['MOLYVOS',	'SECTION_TITLE',	'Molyvos: vital arrival info',
        'MOLYVOS',	'CRITICAL',	'X',
        'MOLYVOS',	'PARAGRAPH',	'Welcome to the Greek island of Lesvos. From wherever you land on the island, you will need to travel to the capital, Mytilene. In Mytilene you will be registered as arriving in Greece. You need to be registered to continue your journey on a ferry or plane, take taxis or stay in hotels. You cannot move around Greece legally without completing the registration process.',
        'MOLYVOS',	'PARAGRAPH',	'Near Mytilene there are registration centres where the police will record personal information from you and your family members or those traveling with you. You may be asked for fingerprints as well. This does not prevent your onward journey. Please cooperate with the police if you are requested to provide fingerprints.Once your registration is completed, you will receive a registration document from the police with your information.',
        'MOLYVOS',	'PARAGRAPH',	'This police note is valid for 6 months, but is only valid in Greece. It is recommended that you make a copy of the document in case you lose or damage it. There are photocopiers available for use at the registration centres.',
        'MOLYVOS',	'TITLE',	'Travel from Molyvos to Mytilene:',
        'MOLYVOS',	'LISTITEM',	'The registration centres are between 65-70km from in a city called . It is a 1.5 hour bus journey or approximately 16 - 20 hour walk.',
        'MOLYVOS',	'LISTITEM',	'Local organisations, NGOs such as the IRC, MSF, Save the Children, Mercy Corps and the UN are operating free bus services to Mytilene, stopping at the registration centres. Once arriving in Molyvos, you will be directed to a location where you can find the bus service.',
        'MOLYVOS',	'LISTITEM',	'Please be aware that there could be a significant wait for the bus, depending on the number of arrivals and the number of buses. Depending on when you arrive, this may include staying the night at the bus station. We kindly ask for your patience.',
        'MOLYVOS',	'LISTITEM',	'You are permitted to walk to Mytilene. For a reasonably fit person, this trip could take about 2 days. Please be advised that the walk is along a highway with sharp turns and many hills. If you attempt to walk, do not walk in the middle of the road, and watch for cars. ',
        'REGISTRATION',	'SECTION_TITLE',	'Registration',
        'REGISTRATION',	'TITLE',	'How to register',
        'REGISTRATION',	'LISTITEM',	'Nationals from can be registered in. Any personal identification documents available need to be presented at the point of registration. The registration process includes review of identification documentation, questions, fingerprinting and photo. Upon completion a police note is provided which allows those individuals listed to stay in Greece for maximum of 6 months before requiring renewal. Each adult registers separately and any children below the age of 18 are registered on their parents police note.',
        'REGISTRATION',	'LISTITEM',	'Nationalities not listed above, must be registered in Moria, with or without a passport, and successful registration results in receipt of a police note that allows them to stay in Greece for a maximum of one month. This registration can be renewed at one month.',
        'REGISTRATION',	'LISTITEM',	'All nationalities must be registered prior to applying for asylum in Greece.',
        'REGISTRATION',	'LISTITEM',	'In Moria there is an office for asylum requests that is open Monday through Friday 9:00 am-3:00 pm. Processing of asylum requests can take approximately 1.5 - 3 months. If asylum is granted, refugees will receive travel documents which permits travel abroad or residence in Greece for up to 3 years. For more information please contact: 22510 32323 (Languages available English-Greek)',
        'REGISTRATION',	'TITLE',	'Registration point locations']

        fakeCellData = [ Test_Cell(x) for x in data ]

        compileLocationDataToJSON(fakeCellData, locationData)

        with open("tests/expectedData.json", "r") as fp:
            j = json.load(fp)


        print json_delta.diff(locationData, j)

        print locationData
        # I have no idea how this comparison works, but it does do a deep comparison of the objects.
        self.assertEqual(locationData, j)
Ejemplo n.º 16
0
def test_request(service, path):
    ref = requests.get(REF[service] + path)
    actual = requests.get(TESTED[service] + path)
    assert actual.status_code == ref.status_code
    assert ref.status_code == 200
    json_diff = diff(actual.json(),
                     ref.json(),
                     array_align=False,
                     verbose=False)
    if (path == '/get_witnesses'):
        # Filter fields not stable between calls.
        json_diff = [
            d for d in json_diff if len(d[0]) != 3
            or d[0][2] not in ['last_confirmed_block_num', 'last_aslot']
        ]
    assert not json_diff, '\n'.join(udiff(actual.json(), ref.json(),
                                          json_diff))
Ejemplo n.º 17
0
    def log_diff(self, control_value, candidate_value):
        # handle serialization of Decimals and dates
        control_value = json.loads(
            json.dumps(control_value, cls=CommCareJSONEncoder))
        candidate_value = json.loads(
            json.dumps(candidate_value, cls=CommCareJSONEncoder))

        diff = json_delta.diff(control_value, candidate_value, verbose=False)
        ReportComparisonDiff.objects.create(
            domain=self.context['domain'],
            control_report_config_id=self.context['report_config_id'],
            candidate_report_config_id=self.context['new_report_config_id'],
            filter_values=self.context['filter_values'],
            control=control_value,
            candidate=candidate_value,
            diff=diff,
        )
Ejemplo n.º 18
0
    def get_diff(self, filename1, filename2):

        ext = filename1.split(".")[-1].lower() 
        if ext not in ['json']: 
            return None

        d1 = json.loads(open(filename1).read())
        d2 = json.loads(open(filename2).read())
        diff = json_delta.diff(d1, d2, verbose=False)

        #[
        #    [['validator', 'metadata-validator', 'files', 1], '*.txt'],  # List addition 
        #    [['track', 'excludes', 3], 'datapackage.json'], 
        #    [['username'], 'pingali1'] # value change
        #    [['track', 'includes', 2]], # List deletion 
        #    [['auto-push']], # key deletion
        #    [['hello'], 12]] # new key 
        #]

        changes = { 
            'New keys or values': 0,
            'Keys deleted': 0,
            'List values added': 0,
            'List values deleted': 0
            }
        
        for d in diff:
            if len(d) == 2: 
                if isinstance(d[0][-1], int):
                    changes['List values added'] += 1 
                else: 
                    changes['New keys or values'] += 1 
            else: 
                if isinstance(d[0][-1], int):
                    changes['List values deleted'] += 1 
                else: 
                    changes['Keys deleted'] += 1 

        summary = { 
            'data': {}
        }
        for c in changes: 
            summary['data'][c] = [c, changes[c]]

        return summary
Ejemplo n.º 19
0
def track_changes():
    '''Calculates the diff between the two schedules.
    This method is to be called on a regular (daily) basis, and will be used to notify users of any changes to their exams
    This is also is used to update the database, as it informs the system of any courses that have changed, or new courses
    In the event of a course being deleted from the schedule for some reason, the system will just send an update that
    the course has changed something. Anything beyond that is beyond the scope of this application
    '''
    with open(c.CURR_ENG_SCHED, 'r') as f:
        current = json.load(f)
    with open(c.PREV_ENG_SCHED, 'r') as f:
        old = json.load(f)

    with open(c.CURR_ARTSCI_SCHED, 'r') as f:
        current_artsci = json.load(f)
    with open(c.PREV_ARTSCI_SCHED, 'r') as f:
        old_artsci = json.load(f)

    current.update(current_artsci)
    old.update(old_artsci)

    schedule_changes = []  #List of courses to update

    delta = json_delta.diff(old, current, verbose=True, key=None)
    #print(delta)
    if delta and len(delta[0]) > 1 and not delta[0][0]:
        # [[[],<target>]] format generated by json_delta. prepare the cannons for mass updates!
        schedule_changes = list(delta[0][1].keys())
    else:
        for course in delta:
            schedule_changes.append(course[0][0])

    #print(schedule_changes)

    schedule_changes = list(
        set(schedule_changes
            ))  #remove duplicate entries of changed elements for each course
    with open(c.DELTA, 'w') as f:
        json.dump(schedule_changes, f)

    package_json(
        current
    )  #generate minimized list of courses for consumption by frontend
Ejemplo n.º 20
0
def compare_json(j1, j2):
    diff = json_delta.diff(j1, j2, False, False)

    if not diff:
        return EQUAL, None
    else:
        try:
            return DIFFER, '\n'.join(json_delta.udiff(j1, j2, diff, 2))
        except:
            print("################ EXCEPTION ################")
            print("#                                         #")
            print("# json_delta raised an exception          #")
            print("# using fallback of difflib.unified()     #")
            print("#                                         #")
            print("###########################################")

            diff = difflib.unified_diff(
                json.dumps(j1, indent=2).split('\n'),
                json.dumps(j2, indent=2).split('\n'))
            return DIFFER, '\n'.join(diff)
Ejemplo n.º 21
0
def track_changes():
    '''Calculates the diff between the two schedules.
    This method is to be called on a regular (daily) basis, and will be used to notify users of any changes to their exams
    This is also is used to update the database, as it informs the system of any courses that have changed, or new courses
    In the event of a course being deleted from the schedule for some reason, the system will just send an update that
    the course has changed something. Anything beyond that is beyond the scope of this application
    '''
    with open(c.CURR_ENG_SCHED,'r') as f:
        current = json.load(f)
    with open(c.PREV_ENG_SCHED,'r') as f:
        old = json.load(f)

    with open(c.CURR_ARTSCI_SCHED,'r') as f:
        current_artsci = json.load(f)
    with open(c.PREV_ARTSCI_SCHED,'r') as f:
        old_artsci = json.load(f)

    current.update(current_artsci)
    old.update(old_artsci)

    schedule_changes = []			    #List of courses to update

    delta = json_delta.diff(old, current, verbose=True, key=None )
    #print(delta)
    if delta and len(delta[0]) > 1 and not delta[0][0]:
        # [[[],<target>]] format generated by json_delta. prepare the cannons for mass updates!
        schedule_changes = list(delta[0][1].keys())
    else:
        for course in delta:
            schedule_changes.append(course[0][0])

    #print(schedule_changes)

    schedule_changes= list(set(schedule_changes))		#remove duplicate entries of changed elements for each course
    with open(c.DELTA,'w') as f:
        json.dump(schedule_changes,f)

    package_json(current)              #generate minimized list of courses for consumption by frontend
def compare_json(j1, j2):

    if j1 == j2:
        return EQUAL, None

    # If the contents are not equal, use json_delta.diff to calculate the exact
    # differences.  Some of our yaml files include sets, which json_delta
    # cannot handle, so in those cases, fall back to doing a diff of the
    # formatted json.
    try:
        diff = json_delta.diff(j1, j2, False, False)
        return DIFFER, '\n'.join(json_delta.udiff(j1, j2, diff, 2))
    except Exception:
        print("################ EXCEPTION ################")
        print("#                                         #")
        print("# json_delta raised an exception          #")
        print("# using fallback of difflib.unified()     #")
        print("#                                         #")
        print("###########################################")

        diff = difflib.unified_diff(
            json.dumps(j1, indent=2, cls=SetEncoder).split('\n'),
            json.dumps(j2, indent=2, cls=SetEncoder).split('\n'))
        return DIFFER, '\n'.join(diff)
Ejemplo n.º 23
0
def update_real_pool(pools, user):
    """Update real pool in Load Balancer and DB."""

    load_balance = dict()
    keys = list()

    for pool in pools:

        pool_obj = facade_v3.get_pool_by_id(pool['id'])

        healthcheck_old = serializers.HealthcheckV3Serializer(
            pool_obj.healthcheck).data

        db_members = pool_obj.serverpoolmember_set.all()
        member_ids = [
            spm['id'] for spm in pool['server_pool_members'] if spm['id']
        ]
        db_members_remove = list(db_members.exclude(id__in=member_ids))
        db_members_id = [str(s.id) for s in db_members]

        pool_obj.update_v3(pool, user, permit_created=True)

        pools_members = list()
        for pool_member in pool['server_pool_members']:

            ip = pool_member['ip']['ip_formated'] if pool_member[
                'ip'] else pool_member['ipv6']['ip_formated']

            if pool_member.get('id', None) is not None:

                member = db_members[db_members_id.index(str(
                    pool_member['id']))]
                ip_db = member.ip.ip_formated \
                    if member.ip else member.ipv6.ip_formated

                if member.port_real == pool_member['port_real'] \
                        and ip_db == ip:
                    # update info of member
                    pools_members.append({
                        'id':
                        pool_member['id'],
                        'identifier':
                        member.identifier,
                        'ip':
                        ip,
                        'port':
                        pool_member['port_real'],
                        'limit':
                        pool_member['limit'],
                        'priority':
                        pool_member['priority'],
                        'member_status':
                        pool_member['member_status'],
                        'weight':
                        pool_member['weight'],
                    })
                else:
                    # delete member with old port and old ip
                    pools_members.append({
                        'id': None,
                        'identifier': member.identifier,
                        'ip': ip_db,
                        'port': member.port_real,
                        'remove': 1
                    })
                    # create member with new port and new ip
                    pools_members.append({
                        'id':
                        pool_member['id'],
                        'identifier':
                        ip,
                        'ip':
                        ip,
                        'port':
                        pool_member['port_real'],
                        'limit':
                        pool_member['limit'],
                        'priority':
                        pool_member['priority'],
                        'weight':
                        pool_member['weight'],
                        'member_status':
                        pool_member['member_status'],
                        'new':
                        1
                    })
            else:
                # create member
                pools_members.append({
                    'id':
                    None,
                    'identifier':
                    ip,
                    'ip':
                    ip,
                    'port':
                    pool_member['port_real'],
                    'limit':
                    pool_member['limit'],
                    'priority':
                    pool_member['priority'],
                    'weight':
                    pool_member['weight'],
                    'member_status':
                    pool_member['member_status'],
                    'new':
                    1
                })

        # delete members
        for member in db_members_remove:
            pools_members.append({
                'id':
                member.id,
                'identifier':
                member.identifier,
                'ip':
                member.ip.ip_formated
                if member.ip else member.ipv6.ip_formated,
                'port':
                member.port_real,
                'remove':
                1
            })

        # get eqpts associate with pool
        equips = _validate_pool_to_apply(pool, update=True, user=user)

        keys.append(sorted([str(eqpt.id) for eqpt in equips]))

        healthcheck = copy.deepcopy(pool['healthcheck'])
        healthcheck['new'] = False
        if json_delta.diff(healthcheck_old, pool['healthcheck']):
            healthcheck['identifier'] = facade_v3.reserve_name_healthcheck(
                pool['identifier'])
            healthcheck['new'] = True

        for e in equips:
            eqpt_id = str(e.id)
            equipment_access = EquipamentoAcesso.search(equipamento=e.id)

            plugin = PluginFactory.factory(e)

            if not load_balance.get(eqpt_id):

                load_balance[eqpt_id] = {
                    'plugin': plugin,
                    'access': equipment_access,
                    'pools': [],
                }

            vips_requests = ServerPool().get_vips_related(pool['id'])

            serializer_vips = serializers_vip.VipRequestV3Serializer(
                vips_requests,
                many=True,
                include=(
                    'ipv4__details',
                    'ipv6__details',
                    'ports__identifier',
                    'ports__pools__server_pool__basic__lb_method',
                ))
            vips = serializer_vips.data

            load_balance[eqpt_id]['pools'].append({
                'id':
                pool['id'],
                'nome':
                pool['identifier'],
                'lb_method':
                pool['lb_method'],
                'healthcheck':
                healthcheck,
                'action':
                pool['servicedownaction']['name'],
                'vips':
                vips,
                'pools_members':
                pools_members
            })

    # pools are in differents load balancers
    keys = [','.join(key) for key in keys]
    if len(list(set(keys))) > 1:
        raise Exception('Pools are in differents load balancers')

    for lb in load_balance:
        load_balance[lb]['plugin'].update_pool(load_balance[lb])

    return {}
Ejemplo n.º 24
0
Archivo: jdic.py Proyecto: kerfab/jdic
 def diff(self, obj):
     """ Returns a delta between this object and obj """
     if isinstance(obj, Jdic):
         obj = obj.raw()
     return json_delta.diff(self.raw(), obj, verbose=False)
Ejemplo n.º 25
0
import json_delta
import json

with open('test1.json','r') as f:
    v1 = json.loads(f.read())
    
    
with open('test2.json','r') as f2:
    v2 = json.loads(f2.read())

diff = json_delta.diff(v1, v2, verbose=False)

for i in diff:
    print(i)
Ejemplo n.º 26
0
def update_clusters(cluster_discoverer,
                    query_cluster: callable,
                    store,
                    query_interval: float = 5,
                    debug: bool = False):
    while True:
        lock = store.acquire_lock()
        if lock:
            try:
                clusters = cluster_discoverer.get_clusters()
                cluster_ids = set()
                for cluster in clusters:
                    cluster_ids.add(cluster.id)
                    status = store.get_cluster_status(cluster.id)
                    now = time.time()
                    if now < status.get('last_query_time', 0) + query_interval:
                        continue
                    backoff = status.get('backoff')
                    if backoff and now < backoff['next_try']:
                        # cluster is still in backoff, skip
                        continue
                    try:
                        logger.debug('Querying cluster {} ({})..'.format(
                            cluster.id, cluster.api_server_url))
                        data = query_cluster(cluster)
                    except Exception as e:
                        backoff = handle_query_failure(e, cluster, backoff)
                        status['backoff'] = backoff
                        store.publish('clusterstatus', {
                            'cluster_id': cluster.id,
                            'status': status
                        })
                    else:
                        status['last_query_time'] = now
                        if backoff:
                            logger.info(
                                'Cluster {} ({}) recovered after {} tries.'.
                                format(cluster.id, cluster.api_server_url,
                                       backoff['tries']))
                            del status['backoff']
                        old_data = store.get_cluster_data(data['id'])
                        if old_data:
                            # https://pikacode.com/phijaro/json_delta/ticket/11/
                            # diff is extremely slow without array_align=False
                            delta = json_delta.diff(old_data,
                                                    data,
                                                    verbose=debug,
                                                    array_align=False)
                            store.publish('clusterdelta', {
                                'cluster_id': cluster.id,
                                'delta': delta
                            })
                            if delta:
                                store.set_cluster_data(cluster.id, data)
                        else:
                            logger.info(
                                'Discovered new cluster {} ({}).'.format(
                                    cluster.id, cluster.api_server_url))
                            # first send status with last_query_time!
                            store.publish('clusterstatus', {
                                'cluster_id': cluster.id,
                                'status': status
                            })
                            store.publish('clusterupdate', data)
                            store.set_cluster_data(cluster.id, data)
                    store.set_cluster_status(cluster.id, status)
                store.set_cluster_ids(cluster_ids)
            except:
                logger.exception('Failed to update')
            finally:
                store.release_lock(lock)
        # sleep 1-2 seconds
        gevent.sleep(min(random_jitter(1), query_interval))
Ejemplo n.º 27
0
                        '-p',
                        dest='package_print',
                        help='Package name to test. Must be importable.')
    parser.add_argument("--debug",
                        dest="debug",
                        action="store_true",
                        help="Verbosity in DEBUG mode")

    args = parser.parse_args()

    main_logger = logging.getLogger()
    logging.basicConfig()
    main_logger.setLevel(logging.DEBUG if args.debug else logging.INFO)

    if args.package_name:
        raise NotImplementedError("FIXME")

    if args.package_print:
        with open(args.package_print) as fd:
            new_report = json.load(fd)

    with open(args.base_input) as fd:
        old_report = json.load(fd)

    result = diff(old_report, new_report)
    with open("result.json", "w") as fd:
        json.dump(result, fd)

    change_log = build_change_log(old_report, new_report)
    print(change_log.build_md())
Ejemplo n.º 28
0
def update_real_pool(pools, user):
    """Update real pool in Load Balancer and DB."""

    load_balance = dict()
    keys = list()

    for pool in pools:

        pool_obj = facade_v3.get_pool_by_id(pool['id'])

        healthcheck_old = serializers.HealthcheckV3Serializer(
            pool_obj.healthcheck).data

        db_members = pool_obj.serverpoolmember_set.all()
        member_ids = [spm['id'] for spm in pool['server_pool_members']
                      if spm['id']]
        db_members_remove = list(db_members.exclude(id__in=member_ids))
        db_members_id = [str(s.id) for s in db_members]

        pool_obj.update_v3(pool, user, permit_created=True)

        pools_members = list()
        for pool_member in pool['server_pool_members']:

            ip = pool_member['ip']['ip_formated'] if pool_member[
                'ip'] else pool_member['ipv6']['ip_formated']

            if pool_member.get('id', None) is not None:

                member = db_members[
                    db_members_id.index(str(pool_member['id']))]
                ip_db = member.ip.ip_formated \
                    if member.ip else member.ipv6.ip_formated

                if member.port_real == pool_member['port_real'] \
                        and ip_db == ip:
                    # update info of member
                    pools_members.append({
                        'id': pool_member['id'],
                        'identifier': member.identifier,
                        'ip': ip,
                        'port': pool_member['port_real'],
                        'limit': pool_member['limit'],
                        'priority': pool_member['priority'],
                        'member_status': pool_member['member_status'],
                        'weight': pool_member['weight'],
                    })
                else:
                    # delete member with old port and old ip
                    pools_members.append({
                        'id': None,
                        'identifier': member.identifier,
                        'ip': ip_db,
                        'port': member.port_real,
                        'remove': 1
                    })
                    # create member with new port and new ip
                    pools_members.append({
                        'id': pool_member['id'],
                        'identifier': ip,
                        'ip': ip,
                        'port': pool_member['port_real'],
                        'limit': pool_member['limit'],
                        'priority': pool_member['priority'],
                        'weight': pool_member['weight'],
                        'member_status': pool_member['member_status'],
                        'new': 1
                    })
            else:
                # create member
                pools_members.append({
                    'id': None,
                    'identifier': ip,
                    'ip': ip,
                    'port': pool_member['port_real'],
                    'limit': pool_member['limit'],
                    'priority': pool_member['priority'],
                    'weight': pool_member['weight'],
                    'member_status': pool_member['member_status'],
                    'new': 1
                })

        # delete members
        for member in db_members_remove:
            pools_members.append({
                'id': member.id,
                'identifier': member.identifier,
                'ip': member.ip.ip_formated
                if member.ip else member.ipv6.ip_formated,
                'port': member.port_real,
                'remove': 1
            })

        # get eqpts associate with pool
        equips = _validate_pool_to_apply(pool, update=True, user=user)

        keys.append(sorted([str(eqpt.id) for eqpt in equips]))

        healthcheck = copy.deepcopy(pool['healthcheck'])
        healthcheck['new'] = False
        if json_delta.diff(healthcheck_old, pool['healthcheck']):
            healthcheck['identifier'] = facade_v3.reserve_name_healthcheck(
                pool['identifier'])
            healthcheck['new'] = True

        for e in equips:
            eqpt_id = str(e.id)
            equipment_access = EquipamentoAcesso.search(
                equipamento=e.id
            )

            plugin = PluginFactory.factory(e)

            if not load_balance.get(eqpt_id):

                load_balance[eqpt_id] = {
                    'plugin': plugin,
                    'access': equipment_access,
                    'pools': [],
                }

            vips_requests = ServerPool().get_vips_related(pool['id'])

            serializer_vips = serializers_vip.VipRequestV3Serializer(
                vips_requests,
                many=True,
                include=(
                    'ipv4__details',
                    'ipv6__details',
                    'ports__identifier',
                    'ports__pools__server_pool__basic__lb_method',
                )
            )
            vips = serializer_vips.data

            load_balance[eqpt_id]['pools'].append({
                'id': pool['id'],
                'nome': pool['identifier'],
                'lb_method': pool['lb_method'],
                'healthcheck': healthcheck,
                'action': pool['servicedownaction']['name'],
                'vips': vips,
                'pools_members': pools_members
            })

    # pools are in differents load balancers
    keys = [','.join(key) for key in keys]
    if len(list(set(keys))) > 1:
        raise Exception('Pools are in differents load balancers')

    for lb in load_balance:
        load_balance[lb]['plugin'].update_pool(load_balance[lb])

    return {}
Ejemplo n.º 29
0
def update_clusters(
    cluster_discoverer,
    query_cluster: Callable[[Cluster], dict],
    store,
    query_interval: float = 5,
    debug: bool = False,
):
    while True:
        lock = store.acquire_lock()
        if lock:
            try:
                clusters = cluster_discoverer.get_clusters()
                cluster_ids = set()
                for cluster in clusters:
                    cluster_ids.add(cluster.id)
                    status = store.get_cluster_status(cluster.id)
                    now = time.time()
                    if now < status.get("last_query_time", 0) + query_interval:
                        continue
                    backoff = status.get("backoff")
                    if backoff and now < backoff["next_try"]:
                        # cluster is still in backoff, skip
                        continue
                    try:
                        logger.debug("Querying cluster {} ({})..".format(
                            cluster.id, cluster.api_server_url))
                        data = query_cluster(cluster)
                    except Exception as e:
                        backoff = handle_query_failure(e, cluster, backoff)
                        status["backoff"] = backoff
                        store.publish(
                            "clusterstatus",
                            {
                                "cluster_id": cluster.id,
                                "status": status
                            },
                        )
                    else:
                        status["last_query_time"] = now
                        if backoff:
                            logger.info(
                                "Cluster {} ({}) recovered after {} tries.".
                                format(cluster.id, cluster.api_server_url,
                                       backoff["tries"]))
                            del status["backoff"]
                        old_data = store.get_cluster_data(data["id"])
                        if old_data:
                            # https://pikacode.com/phijaro/json_delta/ticket/11/
                            # diff is extremely slow without array_align=False
                            delta = json_delta.diff(old_data,
                                                    data,
                                                    verbose=debug,
                                                    array_align=False)
                            store.publish(
                                "clusterdelta",
                                {
                                    "cluster_id": cluster.id,
                                    "delta": delta
                                },
                            )
                            if delta:
                                store.set_cluster_data(cluster.id, data)
                        else:
                            logger.info(
                                "Discovered new cluster {} ({}).".format(
                                    cluster.id, cluster.api_server_url))
                            # first send status with last_query_time!
                            store.publish(
                                "clusterstatus",
                                {
                                    "cluster_id": cluster.id,
                                    "status": status
                                },
                            )
                            store.publish("clusterupdate", data)
                            store.set_cluster_data(cluster.id, data)
                    store.set_cluster_status(cluster.id, status)
                store.set_cluster_ids(cluster_ids)
            except Exception as e:
                logger.exception(f"Failed to update: {e}")
            finally:
                store.release_lock(lock)
        # sleep 1-2 seconds
        gevent.sleep(min(random_jitter(1), query_interval))
Ejemplo n.º 30
0
fname = sys.argv[1]
with open(fname,'r') as fin:
	jin = json.load(fin)
	#now we have it create a temp file write the json there and open it in vim

	with tempfile.NamedTemporaryFile(suffix=".tmp") as tempfile:
		json.dump(jin, tempfile, indent=4, separators=(',',':'), sort_keys=True)
		tempfile.flush()
		call([EDITOR, tempfile.name])
		tempfile.seek(0)
		jnew = json.load(tempfile)

print json.dumps(jin, indent=4, separators=(',',':'), sort_keys=True)
print json.dumps(jnew, indent=4, separators=(',',':'), sort_keys=True)

diff = jd.diff(jin, jnew)

print json.dumps(diff, indent=4, separators=(',',':'), sort_keys=True)

jout = jd.patch(jin, diff, False)
jd.patch(jin, diff, True)

#print json.dumps(jin, indent=4, separators=(',',':'), sort_keys=True)
#print json.dumps(jout, indent=4, separators=(',',':'), sort_keys=True)

actions = toActions(diff)
diff2 = toDiff(actions)

print json.dumps(actions, indent=4, separators=(',',':'), sort_keys=True)
#print json.dumps(diff2, indent=4, separators=(',',':'), sort_keys=True)