Ejemplo n.º 1
0
def assertModelsEqual(sm1, sm2, significant_digits=None, on_demand_keys=[], ignore_keys=[]):
    """
    :param sm1, sm2: Spatial models that should be compared.
    :significant_digits: INT. Ignore changes in float values more digits than `significant_digits`
                         after the decimal point.
    :on_demand_keys: A list of strings. Add this to ON_DEMAND_KEYS
    :on_demand_keys: A list of strings. Add this to IGNORE_KEYS
    :var ON_DEMAND_KEYS: If this attribute evaluates to False for one of the two spatial models, 
                         do not report any changes.
    :var IGNORE_KEYS: Do not report any changes in this attribute.
    :raises: AssertionError, if the models are not equal.
    """
    ON_DEMAND_KEYS=["build_order", "mst", "ang_types", "closed_bulges", "bulges", "newly_added_stems", "stems", "_conf_stats" ]+on_demand_keys
    IGNORE_KEYS = ["newly_added_stems"]+ignore_keys
    diff = DeepDiff(sm1, sm2, significant_digits=significant_digits)
    if diff=={}:
        return
    for mode in ["type_changes", "dic_item_removed", "dic_item_added", "set_item_added", "set_item_removed", "iterable_item_removed", "values_changed"]:
        for key in list(diff.get(mode, {})):
            attribute, remainder, results = value_from_diff(key, sm1, sm2)
            while remainder:
                if (attribute in IGNORE_KEYS or 
                   (attribute in ON_DEMAND_KEYS and (not results[0] or not results[1]))):
                    try:
                        del diff[mode][key] #A dictionary
                    except TypeError: #A set
                        diff[mode].remove(key)
                    break
                attribute, remainder, results = value_from_diff(remainder, results[0], results[1])
        if mode in diff and not diff[mode]:
            del diff[mode]
    assert diff=={}, "Deep Diff is\n"+pformat(diff)
Ejemplo n.º 2
0
def validate_payload(payload, schema):
	diff = DeepDiff(payload, schema)
	result = {'exceptions': []}	
	not_allowed = ['dic_item_added', 'dic_item_removed']
	for exception, details in diff.iteritems():
		if exception in not_allowed:
			result['exceptions'].append({exception: str(details)})
	return result
def test_group_roles(appliance, setup_aws_auth_provider, group_name, role_access, context,
                     soft_assert):
    """Basic default AWS_IAM group role auth + RBAC test

    Validates expected menu and submenu names are present for default
    AWS IAM groups

    NOTE: Only tests vertical navigation tree at the moment, not accordions within the page

    Polarion:
        assignee: apagac
        caseimportance: medium
        initialEstimate: 1/4h
        tags: rbac
    """
    group_access = role_access[group_name]

    try:
        iam_group_name = group_name + '_aws_iam'
        username = credentials[iam_group_name]['username']
        password = credentials[iam_group_name]['password']
        fullname = credentials[iam_group_name]['fullname']
    except KeyError:
        pytest.fail('No match in credentials file for group "{}"'.format(iam_group_name))

    with appliance.context.use(context):
        # fullname overrides user.name attribute, but doesn't impact login with username credential
        user = appliance.collections.users.simple_user(username, password, fullname=fullname)
        with user:
            view = navigate_to(appliance.server, 'LoggedIn')
            assert appliance.server.current_full_name() == user.name
            assert group_name.lower() in [name.lower() for name in appliance.server.group_names()]
            nav_visible = view.navigation.nav_item_tree()

            # RFE BZ 1526495 shows up as an extra requests link in nav
            # TODO BZ remove assert skip when BZ is fixed in 59z
            bz = BZ(1526495,
                    forced_streams=['5.8', '5.9'],
                    unblock=lambda group_name: group_name not in
                    ['evmgroup-user', 'evmgroup-approver', 'evmgroup-desktop', 'evmgroup-vm_user',
                     'evmgroup-administrator', 'evmgroup-super_administrator'])
            for area in group_access.keys():
                # using .get() on nav_visibility because it may not have `area` key
                diff = DeepDiff(group_access[area], nav_visible.get(area, {}),
                                verbose_level=0,  # If any higher, will flag string vs unicode
                                ignore_order=True)
                nav_extra = diff.get('iterable_item_added')

                if nav_extra and 'Requests' in nav_extra.values() and bz.blocks:
                    logger.warning('Skipping RBAC verification for group "%s" in "%s" due to %r',
                                   group_name, area, bz)
                    continue
                else:
                    soft_assert(diff == {}, '{g} RBAC mismatch (expected first) for {a}: {d}'
                                            .format(g=group_name, a=area, d=diff))

        appliance.server.login_admin()
        assert user.exists
Ejemplo n.º 4
0
 def test_item_added_and_removed(self):
     t1 = {'one': 1, 'two': 2, 'three': 3, 'four': 4}
     t2 = {'one': 1, 'two': 4, 'three': 3, 'five': 5, 'six': 6}
     ddiff = DeepDiff(t1, t2, view='tree')
     assert set(ddiff.keys()) == {
         'dictionary_item_added', 'dictionary_item_removed',
         'values_changed'
     }
     assert len(ddiff['dictionary_item_added']) == 2
     assert len(ddiff['dictionary_item_removed']) == 1
Ejemplo n.º 5
0
 def test_item_added_and_removed2(self):
     t1 = {2: 2, 4: 4}
     t2 = {2: "b", 5: 5}
     ddiff = DeepDiff(t1, t2, view='tree')
     assert set(ddiff.keys()), {
         'dictionary_item_added', 'dictionary_item_removed',
         'type_changes'
     }
     assert len(ddiff['dictionary_item_added']) == 1
     assert len(ddiff['dictionary_item_removed']) == 1
Ejemplo n.º 6
0
    def test_serialize_custom_objects_throws_error(self):
        class A:
            pass

        class B:
            pass

        t1 = A()
        t2 = B()
        ddiff = DeepDiff(t1, t2)
        with pytest.raises(TypeError):
            ddiff.to_json()
Ejemplo n.º 7
0
    def test_serialize_custom_objects_with_default_mapping(self):
        class A:
            pass

        class B:
            pass

        t1 = A()
        t2 = B()
        ddiff = DeepDiff(t1, t2)
        default_mapping = {A: lambda x: 'obj A', B: lambda x: 'obj B'}
        result = ddiff.to_json(default_mapping=default_mapping)
        expected_result = {"type_changes": {"root": {"old_type": "A", "new_type": "B", "old_value": "obj A", "new_value": "obj B"}}}
        assert expected_result == json.loads(result)
 def test_deserialization_tree(self):
     t1 = {1: 1, 2: 2, 3: 3, 4: {"a": "hello", "b": [1, 2, 3]}}
     t2 = {1: 1, 2: 2, 3: 3, 4: {"a": "hello", "b": "world\n\n\nEnd"}}
     ddiff = DeepDiff(t1, t2, view='tree')
     jsoned = ddiff.json
     ddiff2 = DeepDiff.from_json(jsoned)
     self.assertTrue('type_changes' in ddiff2)
 def test_deserialization(self):
     t1 = {1: 1, 2: 2, 3: 3, 4: {"a": "hello", "b": [1, 2, 3]}}
     t2 = {1: 1, 2: 2, 3: 3, 4: {"a": "hello", "b": "world\n\n\nEnd"}}
     ddiff = DeepDiff(t1, t2)
     jsoned = ddiff.json
     ddiff2 = DeepDiff.from_json(jsoned)
     self.assertEqual(ddiff, ddiff2)
Ejemplo n.º 10
0
def do_the_diff(defaults, overrides):
    """Diff two yaml loaded objects, returning diffs in a namedtuple

    :param defaults: File you wish to compare against, the base.
    :param overrides: File you wish to compare, the mask.
    :return: what is added, taken away, changed(added) and changed(removed).
    :rtype: namedtuple
    """
    added = {}
    removed = {}
    changed = {}
    changed_types = {}
    parsed_diff = namedtuple('parsed_yaml_diff',
                             ['added', 'removed',
                              'changed_added', 'changed_removed'])

    diff = DeepDiff(defaults, overrides, verbose_level=2)

    for old_key, value in diff.get('dictionary_item_added', {}).items():
        added[root_cleanup(old_key)] = value
    for old_key, value in diff.get('dictionary_item_removed', {}).items():
        removed[root_cleanup(old_key)] = value
    for old_key, value in diff.get('values_changed', {}).items():
        changed[root_cleanup(old_key)] = value
    for old_key, value in diff.get('type_changes', {}).items():
        changed_types[root_cleanup(old_key)] = value

    # Have to recast the values as dictionaries and combine type/value changes
    new_changed = {}
    new_changed_types = {}
    for key in changed:
        new_changed[key] = dict(changed[key])
    for key in changed_types:
        new_changed_types[key] = dict(changed_types[key])
    new_changed.update(new_changed_types)

    # we make two different yaml dump-able dictionaries, one for new values,
    # one for old (what is being overridden)
    new_changed_dict = {}
    old_changed_dict = {}
    for key in new_changed:
        new_changed_dict[key] = new_changed[key]['new_value']
        old_changed_dict[key] = new_changed[key]['old_value']

    return parsed_diff(added=added, removed=removed,
                       changed_added=new_changed_dict,
                       changed_removed=old_changed_dict)
Ejemplo n.º 11
0
    def test_compare_db_toc_and_derived_toc(self):
        derived_toc = s.update_table_of_contents()
        base_json = json.dumps(derived_toc, sort_keys=True)
        oo_toc = library.get_toc_tree()
        serialized_oo_toc = oo_toc.get_root().serialize()["contents"]

        # Deep test of toc lists
        result = DeepDiff(derived_toc, serialized_oo_toc)
        assert not result or all(["JPS" in j["new_value"] for i in result.values() for j in i.values()])

        if result:
            # Irrelevant difference, but it makes the test below impossible.
            return

        # Check that the json is identical -
        # that the round-trip didn't change anything by reference that would poison the deep test
        new_json = json.dumps(serialized_oo_toc, sort_keys=True)
        assert len(base_json) == len(new_json)
Ejemplo n.º 12
0
    def test_non_subscriptable_iterable(self):
        t1 = (i for i in [42, 1337, 31337])
        t2 = (i for i in [
            42,
            1337,
        ])
        ddiff = DeepDiff(t1, t2, view='tree')
        (change, ) = ddiff['iterable_item_removed']

        assert set(ddiff.keys()) == {'iterable_item_removed'}
        assert len(ddiff['iterable_item_removed']) == 1

        assert change.up.t1 == t1
        assert change.up.t2 == t2
        assert change.report_type == 'iterable_item_removed'
        assert change.t1 == 31337
        assert change.t2 is notpresent

        assert isinstance(change.up.t1_child_rel,
                          NonSubscriptableIterableRelationship)
        assert change.up.t2_child_rel is None
Ejemplo n.º 13
0
    def compare_dicts(cls, generated, expected, log=None):
        """Compare two dictionaries and generate error if required"""
        if log:
            log.debug(_("Generated: {0}").format(generated))
            log.debug(_("Expected: {0}").format(expected))

        diff = DeepDiff(expected, generated)
        if log:
            log.debug(_("Keys in diff: {0}").format(diff.keys()))
            log.info(_("Differences:\n"))

        if log:
            d = pprint.pformat(diff, indent=cls.INDENT)
            log.info("Differences:\n{0}".format(d))

        if len(set(cls.ERROR_ITEMS).intersection(diff.keys())):
            diff_str = pprint.pformat(diff)
            msg = _("Found item changes: {0}").format(diff_str)
            if log:
                log.error(msg)
            raise ValueError(msg)
Ejemplo n.º 14
0
    def test_non_subscriptable_iterable(self):
        t1 = (i for i in [42, 1337, 31337])
        t2 = (i for i in [
            42,
            1337,
        ])
        ddiff = DeepDiff(t1, t2, view='tree')
        (change, ) = ddiff['iterable_item_removed']

        self.assertEqual(set(ddiff.keys()), {'iterable_item_removed'})
        self.assertEqual(len(ddiff['iterable_item_removed']), 1)

        self.assertEqual(change.up.t1, t1)
        self.assertEqual(change.up.t2, t2)
        self.assertEqual(change.report_type, 'iterable_item_removed')
        self.assertEqual(change.t1, 31337)
        self.assertEqual(change.t2, notpresent)

        self.assertIsInstance(change.up.t1_child_rel,
                              NonSubscriptableIterableRelationship)
        self.assertIsNone(change.up.t2_child_rel)
Ejemplo n.º 15
0
def main():
    parser = argparse.ArgumentParser(description='Test rtl_433')
    parser.add_argument('-c', '--rtl433-cmd', default="rtl_433",
                   help='rtl_433 command')
    parser.add_argument('-I', '--ignore-field', default=[], action="append",
                   help='Field to ignore in JSON data')
    args = parser.parse_args()

    rtl_433_cmd = args.rtl433_cmd
    ignore_fields = args.ignore_field

    expected_json = find_json()
    for output_fn in expected_json:
        input_fn = os.path.splitext(output_fn)[0] + ".data"
        if not os.path.isfile(input_fn):
            print("WARNING: Missing '%s'" % input_fn)
            continue

        # Run rtl_433
        rtl433out, err = run_rtl433(input_fn, rtl_433_cmd)

        # get JSON results, keep only first line for now
        rtl433out = rtl433out.strip()
        rtl433out = rtl433out.split("\n")[0]
        results = json.loads(rtl433out)
        results = remove_fields(results, ignore_fields)

        # Open expected data
        with open(output_fn, "r") as output_file:
            expected_data = json.load(output_file)
            expected_data = remove_fields(expected_data, ignore_fields)

        # Compute the diff
        diff = DeepDiff(expected_data, results)
        if(diff):
            print("## Fail with '%s':" % input_fn)
            for error, details in diff.items():
                print(" %s" %error)
                for detail in details:
                    print("  * %s" %detail)
Ejemplo n.º 16
0
def printdiff(obj1, obj2):
    print(DeepDiff(obj1, obj2))
def test_group_roles(appliance, setup_aws_auth_provider, group_name,
                     role_access, context, soft_assert):
    """Basic default AWS_IAM group role auth + RBAC test

    Validates expected menu and submenu names are present for default
    AWS IAM groups

    NOTE: Only tests vertical navigation tree at the moment, not accordions within the page
    """
    group_access = role_access[group_name]

    try:
        iam_group_name = group_name + '_aws_iam'
        username = credentials[iam_group_name]['username']
        password = credentials[iam_group_name]['password']
        fullname = credentials[iam_group_name]['fullname']
    except KeyError:
        pytest.fail('No match in credentials file for group "{}"'.format(
            iam_group_name))

    with appliance.context.use(context):
        # fullname overrides user.name attribute, but doesn't impact login with username credential
        user = appliance.collections.users.simple_user(username,
                                                       password,
                                                       fullname=fullname)
        with user:
            view = navigate_to(appliance.server,
                               'LoggedIn',
                               wait_for_view=True)
            assert appliance.server.current_full_name() == user.name
            assert group_name.lower() in [
                name.lower() for name in appliance.server.group_names()
            ]
            nav_visible = view.navigation.nav_item_tree()

            # RFE BZ 1526495 shows up as an extra requests link in nav
            # TODO BZ remove assert skip when BZ is fixed in 59z
            bz = BZ(
                1526495,
                forced_streams=['5.8', '5.9'],
                unblock=lambda group_name: group_name not in [
                    'evmgroup-user', 'evmgroup-approver', 'evmgroup-desktop',
                    'evmgroup-vm_user', 'evmgroup-administrator',
                    'evmgroup-super_administrator'
                ])
            for area in group_access.keys():
                # using .get() on nav_visibility because it may not have `area` key
                diff = DeepDiff(
                    group_access[area],
                    nav_visible.get(area, {}),
                    verbose_level=
                    0,  # If any higher, will flag string vs unicode
                    ignore_order=True)
                nav_extra = diff.get('iterable_item_added')

                if nav_extra and 'Requests' in nav_extra.values(
                ) and bz.blocks:
                    logger.warning(
                        'Skipping RBAC verification for group "%s" in "%s" due to %r',
                        group_name, area, bz)
                    continue
                else:
                    soft_assert(
                        diff == {},
                        '{g} RBAC mismatch (expected first) for {a}: {d}'.
                        format(g=group_name, a=area, d=diff))

        appliance.server.login_admin()
        assert user.exists
Ejemplo n.º 18
0
 def test_deserialization(self):
     ddiff = DeepDiff(t1, t2)
     jsoned = ddiff.to_json_pickle()
     ddiff2 = DeepDiff.from_json(jsoned)
     assert ddiff == ddiff2
Ejemplo n.º 19
0
 def test_deserialization_tree(self):
     ddiff = DeepDiff(t1, t2, view='tree')
     jsoned = ddiff.to_json_pickle()
     ddiff2 = DeepDiff.from_json(jsoned)
     assert 'type_changes' in ddiff2
Ejemplo n.º 20
0
 def test_skip_dictionary_path(self):
     t1 = {1: {2: "a"}}
     t2 = {1: {}}
     ddiff = DeepDiff(t1, t2, exclude_paths=['root[1][2]'])
     result = {}
     self.assertEqual(ddiff, result)
Ejemplo n.º 21
0
    def add_or_update_tess_in_lastet_measures(self, tess_object):
        DASH_TEMPLATE_UID = "template_tess_latest_measures"  # uid dash template
        DASH_FINAL_UID = "tess_latest_measures"
        STATIC_PANELS = 0  # Total de paneles estaticos que hay al comienzo del dashboard

        # Load template
        dash_info_template = self.__find_template(DASH_TEMPLATE_UID)

        if not "dashboard" in dash_info_template:
            return ({"status": "error", "error": "No exist template " + DASH_TEMPLATE_UID + " in organisation " + str(self.grafana_template_org_id)})

        # Updete Folder ID
        dash_info_template["folderId"] = self.__create_folder(dash_info_template["meta"]["folderTitle"])

        # Replace All Tokens
        dash_info_template = self.__replace_template_tokens(dash_info_template, tess_object)
        if "status" in dash_info_template and dash_info_template["status"] == "error":
            return dash_info_template

        # Search dashboards with this tittle
        results = self.__find_dash_by_title(dash_info_template["dashboard"]["title"])

        response = None

        if results:
            # print("Si Existe el dashboard para este país")
            dash_info = self.grafana_api.get_dashboard_by_uid(results[0]['uid'])

            # Remove statics panels
            p = re.compile('stars\d+', re.IGNORECASE)
            in_use_ids = []
            for panel_info in dash_info["dashboard"]["panels"][:]:
                panel_info_string = json.dumps(panel_info)
                if not p.findall(panel_info_string):
                    dash_info["dashboard"]["panels"].remove(panel_info)
                else:
                    in_use_ids.append(panel_info["id"])

            # Set unused id to static panels
            for idx in range(STATIC_PANELS):
                dash_info_template["dashboard"]["panels"][idx]["id"] = self.__get_next_id(in_use_ids)
                in_use_ids.append(self.__get_next_id(in_use_ids))

            # Append static panels
            dash_info["dashboard"]["panels"] = dash_info_template["dashboard"]["panels"][:STATIC_PANELS] + dash_info["dashboard"]["panels"]

            # Fix gridPos
            dash_info["dashboard"]["panels"] = self.__fix_gridPos(dash_info["dashboard"]["panels"], allowGridPanel=True)

            # Comprobar si existen los paneles en el dash
            panels_ids = []
            # Obtener los paneles del dash que ya contengan el tess_id
            p = re.compile('stars\d+', re.IGNORECASE)
            for idx, panel_info in enumerate(dash_info["dashboard"]["panels"]):
                panel_info_string = json.dumps(panel_info).replace('stars4all', '')
                if tess_object["[token_tess_id]"] in p.findall(panel_info_string):
                    panels_ids.append(idx)

            # Comprobar el estado de los paneles
            if len(panels_ids) == 0:
                # Añadir los paneles nuevos
                for idx in range(len(dash_info_template["dashboard"]["panels"][STATIC_PANELS:])):
                    dash_info_template["dashboard"]["panels"][idx + STATIC_PANELS]["id"] = self.__get_next_id(in_use_ids)
                    in_use_ids.append(self.__get_next_id(in_use_ids))
                    dash_info["dashboard"]["panels"].append(dash_info_template["dashboard"]["panels"][idx + STATIC_PANELS])

            else:
                # Obtener el id de los paneles que ya existen
                for idx in range(len(panels_ids)):
                    for idx_template, panel_template in enumerate(dash_info_template["dashboard"]["panels"]):
                        diff = DeepDiff(dash_info["dashboard"]["panels"][panels_ids[idx]], panel_template)
                        if any(elem in ["dictionary_item_added", "dictionary_item_removed", "iterable_item_removed", "type_changes", "iterable_item_added"] for elem in diff.keys()):
                            continue
                        panel_template["id"] = dash_info["dashboard"]["panels"][panels_ids[idx]]["id"]
                        panel_template["gridPos"] = dash_info["dashboard"]["panels"][panels_ids[idx]]["gridPos"]
                        dash_info["dashboard"]["panels"][panels_ids[idx]] = panel_template

            # Sort
            try:
                arr_no_sort = []
                for i in range(STATIC_PANELS, len(dash_info["dashboard"]["panels"]), len(dash_info_template["dashboard"]["panels"]) - STATIC_PANELS):
                    stars_ids = re.findall('\d+', dash_info["dashboard"]["panels"][i]["title"])
                    arr_no_sort.append({"idx": i, "starsid": int(stars_ids[0])})

                arr_sort = sorted(arr_no_sort, key=lambda k: k['starsid'])

                if arr_no_sort != arr_sort:
                    temp_panesl = []
                    for panel in dash_info_template["dashboard"]["panels"][:STATIC_PANELS]:
                        temp_panesl.append(panel)

                    for item in arr_sort:
                        for idx in range(len(dash_info_template["dashboard"]["panels"]) - STATIC_PANELS):
                            temp_panesl.append(dash_info["dashboard"]["panels"][item["idx"] + idx])

                    dash_info["dashboard"]["panels"] = temp_panesl

            except:
                pass

            # Fix gridPos
            dash_info["dashboard"]["panels"] = self.__fix_gridPos(dash_info["dashboard"]["panels"], allowGridPanel=True)

            # Set uid
            dash_info["dashboard"]["uid"] = DASH_FINAL_UID

            # Save
            response = self.__update_dash(dash_info, message="Update Tess: " + tess_object["[token_tess_id]"])

        else:
            # Create
            response = self.__create_dash_from_template(
                dash_info_template, message="Create Dash and added Tess: " + tess_object["[token_tess_id]"], uid=DASH_FINAL_UID)

        self.__set_starred_configuration(response, dash_info_template)
        return(response)
Ejemplo n.º 22
0
 def test_list_difference_ignore_order(self):
     t1 = {1: 1, 4: {"a": "hello", "b": [1, 2, 3]}}
     t2 = {1: 1, 4: {"a": "hello", "b": [1, 3, 2, 3]}}
     ddiff = DeepDiff(t1, t2, ignore_order=True)
     self.assertEqual(ddiff, {})
Ejemplo n.º 23
0
 def test_nested_list_difference_ignore_order(self):
     t1 = [1, 2, [3, 4]]
     t2 = [[4, 3], 2, 1]
     ddiff = DeepDiff(t1, t2, ignore_order=True)
     self.assertEqual(ddiff, {})
Ejemplo n.º 24
0
 def test_list_of_sets_difference_ignore_order(self):
     t1 = [{1}, {2}, {3}]
     t2 = [{4}, {1}, {2}, {3}]
     ddiff = DeepDiff(t1, t2, ignore_order=True)
     result = {'iterable_item_added': {'root[0]': {4}}}
     self.assertEqual(ddiff, result)
Ejemplo n.º 25
0
    def test_ignore_order_when_objects_similar(self):
        """
        The current design can't recognize that

        {
            'key5': 'val5,
            'key6': 'val6',
        }

        at index 1

        has become

        {
            'key5': 'CHANGE',
            'key6': 'val6',
        }

        at index 0.

        Further thought needs to go into designing
        an algorithm that can identify the modified objects when ignoring order.

        The current algorithm computes the hash of the objects and since the hashes
        are different, it assumes an object is removed and another one is added.
        """

        t1 = {
            'key1':
            'val1',
            'key2': [
                {
                    'key3': 'val3',
                    'key4': 'val4',
                },
                {
                    'key5': 'val5',
                    'key6': 'val6',
                },
            ],
        }

        t2 = {
            'key1':
            'val1',
            'key2': [
                {
                    'key5': 'CHANGE',
                    'key6': 'val6',
                },
                {
                    'key3': 'val3',
                    'key4': 'val4',
                },
            ],
        }

        ddiff = DeepDiff(t1, t2, ignore_order=True)
        self.assertEqual(
            ddiff, {
                'iterable_item_removed': {
                    "root['key2'][1]": {
                        'key5': 'val5',
                        'key6': 'val6'
                    }
                },
                'iterable_item_added': {
                    "root['key2'][0]": {
                        'key5': 'CHANGE',
                        'key6': 'val6'
                    }
                }
            })
Ejemplo n.º 26
0
 def test_list_of_unhashable_difference_ignore_order4(self):
     t1 = [{"a": 2}, {"a": 2}]
     t2 = [{"a": 2}]
     ddiff = DeepDiff(t1, t2, ignore_order=True)
     result = {}
     self.assertEqual(ddiff, result)
Ejemplo n.º 27
0
 def test_same_objects(self):
     t1 = {1: 1, 2: 2, 3: 3}
     t2 = t1
     self.assertEqual(DeepDiff(t1, t2), {})
Ejemplo n.º 28
0
 def test_list_of_unhashable_difference_ignore_order(self):
     t1 = [{"a": 2}, {"b": [3, 4, {1: 1}]}]
     t2 = [{"b": [3, 4, {1: 1}]}, {"a": 2}]
     ddiff = DeepDiff(t1, t2, ignore_order=True)
     self.assertEqual(ddiff, {})
Ejemplo n.º 29
0
 def test_list_difference_add(self):
     t1 = [1, 2]
     t2 = [1, 2, 3, 5]
     ddiff = DeepDiff(t1, t2)
     result = {'iterable_item_added': {'root[2]': 3, 'root[3]': 5}}
     self.assertEqual(ddiff, result)
Ejemplo n.º 30
0
 def test_unknown_parameters(self):
     with self.assertRaises(ValueError):
         DeepDiff(1, 1, wrong_param=2)
Ejemplo n.º 31
0
 def test_set_strings(self):
     t1 = {"veggies", "tofu"}
     t2 = {"veggies", "tofu", "seitan"}
     ddiff = DeepDiff(t1, t2)
     result = {'set_item_added': {"root['seitan']"}}
     self.assertEqual(ddiff, result)
Ejemplo n.º 32
0
 def test_dictionary_difference_ignore_order(self):
     t1 = {"a": [[{"b": 2, "c": 4}, {"b": 2, "c": 3}]]}
     t2 = {"a": [[{"b": 2, "c": 3}, {"b": 2, "c": 4}]]}
     ddiff = DeepDiff(t1, t2, ignore_order=True)
     self.assertEqual(ddiff, {})
Ejemplo n.º 33
0
    def test_custom_objects_add_and_remove_method(self):
        t1, t2 = self.get_custom_object_with_added_removed_methods()
        ddiff = DeepDiff(t1, t2)

        result = {'attribute_added': {'root.method_a', 'root.method_b'}}
        self.assertEqual(ddiff, result)
Ejemplo n.º 34
0
 def test_list_none_item_removed(self):
     t1 = [1, 2, None]
     t2 = [1, 2]
     ddiff = DeepDiff(t1, t2)
     result = {'iterable_item_removed': {'root[2]': None}}
     self.assertEqual(ddiff, result)
Ejemplo n.º 35
0
 def test_skip_str_type_in_dictionary(self):
     t1 = {1: {2: "a"}}
     t2 = {1: {}}
     ddiff = DeepDiff(t1, t2, exclude_types=[str])
     result = {}
     self.assertEqual(ddiff, result)
Ejemplo n.º 36
0
 def test_decimal_ignore_order(self):
     t1 = [{1: Decimal('10.1')}, {2: Decimal('10.2')}]
     t2 = [{2: Decimal('10.2')}, {1: Decimal('10.1')}]
     ddiff = DeepDiff(t1, t2, ignore_order=True)
     result = {}
     self.assertEqual(ddiff, result)
Ejemplo n.º 37
0
 def test_skip_list_path(self):
     t1 = ['a', 'b']
     t2 = ['a']
     ddiff = DeepDiff(t1, t2, exclude_paths=['root[1]'])
     result = {}
     self.assertEqual(ddiff, result)
Ejemplo n.º 38
0
    async def evaluate_started(self,
                               first_time,
                               plugin_booting,
                               event=None):  # noqa: C901

        if first_time is True:
            self.hass_ready = False
            self.state_matched = False

        if plugin_booting is True:
            startup_conditions = self.plugin_startup_conditions
        else:
            startup_conditions = self.appdaemon_startup_conditions

        start_ok = True

        if "hass_state" not in startup_conditions:
            startup_conditions["hass_state"] = "RUNNING"

        if "delay" in startup_conditions:
            if first_time is True:
                self.logger.info("Delaying startup for %s seconds",
                                 startup_conditions["delay"])
                await asyncio.sleep(int(startup_conditions["delay"]))

        if "hass_state" in startup_conditions:
            self.metadata = await self.get_hass_config()
            if "state" in self.metadata:
                if self.metadata["state"] == startup_conditions["hass_state"]:
                    if self.hass_ready is False:
                        self.logger.info(
                            "Startup condition met: hass state=RUNNING")
                        self.hass_ready = True
                else:
                    start_ok = False

        if "state" in startup_conditions:
            state = await self.get_complete_state()
            entry = startup_conditions["state"]
            if "value" in entry:
                # print(entry["value"], state[entry["entity"]])
                # print(DeepDiff(state[entry["entity"]], entry["value"]))
                if entry[
                        "entity"] in state and "values_changed" not in DeepDiff(
                            entry["value"], state[entry["entity"]]):
                    if self.state_matched is False:
                        self.logger.info(
                            "Startup condition met: %s=%s",
                            entry["entity"],
                            entry["value"],
                        )
                        self.state_matched = True
                else:
                    start_ok = False
            elif entry["entity"] in state:
                if self.state_matched is False:
                    self.logger.info("Startup condition met: %s exists",
                                     entry["entity"])
                    self.state_matched = True
                else:
                    start_ok = False

        if "event" in startup_conditions:
            if event is not None:
                entry = startup_conditions["event"]
                if "data" not in entry:
                    if entry["event_type"] == event["event_type"]:
                        self.logger.info(
                            "Startup condition met: event type %s fired",
                            event["event_type"],
                        )
                    else:
                        start_ok = False
                else:
                    if entry["event_type"] == event["event_type"]:
                        if "values_changed" not in DeepDiff(
                                event["data"], entry["data"]):
                            self.logger.info(
                                "Startup condition met: event type %s, data = %s fired",
                                event["event_type"],
                                entry["data"],
                            )
                    else:
                        start_ok = False
            else:
                start_ok = False

        if start_ok is True:
            # We are good to go
            self.logger.info("All startup conditions met")
            self.reading_messages = True
            state = await self.get_complete_state()
            await self.AD.plugins.notify_plugin_started(
                self.name, self.namespace, self.metadata, state,
                self.first_time)
            self.first_time = False
            self.already_notified = False
Ejemplo n.º 39
0
 def test_serialization_text(self):
     ddiff = DeepDiff(t1, t2)
     assert "builtins.list" in ddiff.to_json_pickle()
     jsoned = ddiff.to_json()
     assert "world" in jsoned
Ejemplo n.º 40
0
 def _is_mock_equal(data, mock_name):
     mock = open_mock(mock_name)
     return DeepDiff(data, mock, ignore_order=True) == {}
Ejemplo n.º 41
0
 def test_serialization_tree(self):
     ddiff = DeepDiff(t1, t2, view='tree')
     pickle_jsoned = ddiff.to_json_pickle()
     assert "world" in pickle_jsoned
     jsoned = ddiff.to_json()
     assert "world" in jsoned
Ejemplo n.º 42
0
 def test_significant_digits_for_complex_real_part(self):
     t1 = 1.23446879 + 1.22225j
     t2 = 1.23446764 + 1.22225j
     ddiff = DeepDiff(t1, t2, significant_digits=5)
     self.assertEqual(ddiff, {})
Ejemplo n.º 43
0
 def test_skip_custom_object_path(self):
     t1 = CustomClass(1)
     t2 = CustomClass(2)
     ddiff = DeepDiff(t1, t2, exclude_paths=['root.a'])
     result = {}
     self.assertEqual(ddiff, result)
Ejemplo n.º 44
0
 def test_negative_significant_digits(self):
     with self.assertRaises(ValueError):
         DeepDiff(1, 1, significant_digits=-1)
Ejemplo n.º 45
0
                                       'ACL': "public-read",
                                       'StorageClass': "REDUCED_REDUNDANCY"
                                   })
    except botocore.exceptions.ClientError as e:
        log('There was an error uploading cards.json.')
        log(('The error was "{}"').format(e))
    log('Finished.')


if __name__ == '__main__':
    log('Starting..')
    json2csv('cards.json')
    json2csv('old-cards.json')
    log('Finished converting and saving files in csv format.')
    thereAreNewCards = differences()
    if thereAreNewCards > 0:
        downloadCards()
        uploadToAWS()
    else:
        newFile = readFile('cards.json')
        oldFile = readFile('old-cards.json')
        difr = findTheIndex((str(DeepDiff(newFile, oldFile))))
        nerfed = findCardName(difr)
        if len(nerfed) == 0:
            log('No nerfed/buffed cards found')
        else:
            saveCardsToCsv(nerfed)
            downloadCards()
            uploadToAWS()
    time.sleep(86400)
Ejemplo n.º 46
0
 def test_dict_none_item_removed(self):
     t1 = {1: None, 2: 2}
     t2 = {2: 2}
     ddiff = DeepDiff(t1, t2)
     result = {'dictionary_item_removed': {'root[1]'}}
     self.assertEqual(ddiff, result)
Ejemplo n.º 47
0
 def test_custom_objects_add_and_remove_method_verbose(self):
     t1, t2 = self.get_custom_object_with_added_removed_methods()
     ddiff = DeepDiff(t1, t2, verbose_level=2)
     self.assertTrue('root.method_a' in ddiff['attribute_added'])
     self.assertTrue('root.method_b' in ddiff['attribute_added'])
Ejemplo n.º 48
0
 def test_diff_when_hash_fails(self, mock_DeepHash, mock_logger):
     mock_DeepHash.side_effect = Exception('Boom!')
     t1 = {"blah": {4}, 2: 1337}
     t2 = {"blah": {4}, 2: 1337}
     DeepDiff(t1, t2, ignore_order=True)
     assert mock_logger.warning.called
Ejemplo n.º 49
0
 def test_significant_digits_for_decimals(self):
     t1 = Decimal('2.5')
     t2 = Decimal('1.5')
     ddiff = DeepDiff(t1, t2, significant_digits=0)
     self.assertEqual(ddiff, {})
Ejemplo n.º 50
0
    def test_most_active_authors_stats(self):
        """
        Test query: most_active_authors_stats
        """
        params = set_params({})
        ret = self.eldb.run_named_query('most_active_authors_stats', '.*',
                                        params)
        expected = {
            'ChangeCommentedEvent': {
                'count_avg':
                1,
                'count_median':
                1.0,
                'items': [
                    {
                        'doc_count': 1,
                        'key': 'jane'
                    },
                    {
                        'doc_count': 1,
                        'key': 'steve'
                    },
                ],
                'total':
                2,
                'total_hits':
                2,
            },
            'ChangeCreatedEvent': {
                'count_avg':
                1.3333333333333333,
                'count_median':
                1,
                'items': [
                    {
                        'doc_count': 2,
                        'key': 'jane'
                    },
                    {
                        'doc_count': 1,
                        'key': 'john'
                    },
                    {
                        'doc_count': 1,
                        'key': 'steve'
                    },
                ],
                'total':
                3,
                'total_hits':
                4,
            },
            'ChangeMergedEvent': {
                'count_avg':
                1,
                'count_median':
                1,
                'items': [
                    {
                        'doc_count': 1,
                        'key': 'jane'
                    },
                    {
                        'doc_count': 1,
                        'key': 'john'
                    },
                    {
                        'doc_count': 1,
                        'key': 'steve'
                    },
                ],
                'total':
                3,
                'total_hits':
                3,
            },
            'ChangeReviewedEvent': {
                'count_avg':
                1.3333333333333333,
                'count_median':
                1,
                'items': [
                    {
                        'doc_count': 2,
                        'key': 'john'
                    },
                    {
                        'doc_count': 1,
                        'key': 'jane'
                    },
                    {
                        'doc_count': 1,
                        'key': 'steve'
                    },
                ],
                'total':
                3,
                'total_hits':
                4,
            },
        }

        ddiff = DeepDiff(ret, expected)
        if ddiff:
            raise DiffException(ddiff)

        params = set_params({'authors': 'jane'})
        ret = self.eldb.run_named_query('most_active_authors_stats', '.*',
                                        params)
        expected = {
            'ChangeCommentedEvent': {
                'count_avg': 1,
                'count_median': 1,
                'items': [{
                    'doc_count': 1,
                    'key': 'jane'
                }],
                'total': 1,
                'total_hits': 1,
            },
            'ChangeCreatedEvent': {
                'count_avg': 2,
                'count_median': 2,
                'items': [{
                    'doc_count': 2,
                    'key': 'jane'
                }],
                'total': 1,
                'total_hits': 2,
            },
            'ChangeMergedEvent': {
                'count_avg': 1,
                'count_median': 1,
                'items': [{
                    'doc_count': 1,
                    'key': 'jane'
                }],
                'total': 1,
                'total_hits': 1,
            },
            'ChangeReviewedEvent': {
                'count_avg': 1,
                'count_median': 1,
                'items': [{
                    'doc_count': 1,
                    'key': 'jane'
                }],
                'total': 1,
                'total_hits': 1,
            },
        }

        ddiff = DeepDiff(ret, expected)
        if ddiff:
            raise DiffException(ddiff)
Ejemplo n.º 51
0
def main():
    """Check all reference json files vs actual output."""
    parser = argparse.ArgumentParser(description='Test rtl_433')
    parser.add_argument('-c', '--rtl433-cmd', default="rtl_433",
                        help='rtl_433 command')
    parser.add_argument('-I', '--ignore-field', default=[], action="append",
                        help='Field to ignore in JSON data')
    parser.add_argument('--first-line', default=False, action="store_true",
                        help='Only compare the first outputed line of rtl433'
                             ' with first line of reference json')
    args = parser.parse_args()

    rtl_433_cmd = args.rtl433_cmd
    ignore_fields = args.ignore_field
    first_line = args.first_line

    expected_json = find_json()
    nb_ok = 0
    nb_fail = 0
    for output_fn in expected_json:
        input_fn = os.path.splitext(output_fn)[0] + ".cu8"
        if not os.path.isfile(input_fn):
            print("WARNING: Missing '%s'" % input_fn)
            continue

        ignore_fn = os.path.join(os.path.dirname(output_fn), "ignore")
        if os.path.isfile(ignore_fn):
            print("WARNING: Ignoring '%s'" % input_fn)
            continue

        samplerate = 250000
        samplerate_fn = os.path.join(os.path.dirname(output_fn), "samplerate")
        if os.path.isfile(samplerate_fn):
            with open(samplerate_fn, "r") as samplerate_file:
                samplerate = int(samplerate_file.readline())

        protocol = None
        protocol_fn = os.path.join(os.path.dirname(output_fn), "protocol")
        if os.path.isfile(protocol_fn):
            with open(protocol_fn, "r") as protocol_file:
                protocol = int(protocol_file.readline())

        # Open expected data
        expected_data = []
        with open(output_fn, "r") as output_file:
            try:
                for json_line in output_file.readlines():
                    if not json_line.strip():
                        continue
                    expected_data.append(json.loads(json_line))
            except ValueError as err:
                print("ERROR: invalid json: '%s'" % output_fn)
                continue
            expected_data = remove_fields(expected_data, ignore_fields)

        # Run rtl_433
        rtl433out, err = run_rtl433(input_fn, rtl_433_cmd,
                                    samplerate, protocol)

        # get JSON results
        rtl433out = rtl433out.decode('utf8').strip()
        results = []
        for json_line in rtl433out.split("\n"):
            if not json_line.strip():
                continue
            try:
                results.append(json.loads(json_line))
            except ValueError:
                nb_fail += 1
                # TODO: factorise error print
                print("## Fail with '%s': invalid json output" % input_fn)
                print("%s" % json_line)
                continue
        results = remove_fields(results, ignore_fields)

        if first_line:
            if len(results) == 0:
                results.append({})
            if len(expected_data) == 0:
                expected_data.append({})
            expected_data, results = expected_data[0], results[0]

        # Compute the diff
        diff = DeepDiff(expected_data, results)
        if diff:
            nb_fail += 1
            print("## Fail with '%s':" % input_fn)
            for error, details in diff.items():
                print(" %s" % error)
                for detail in details:
                    print("  * %s" % detail)
        else:
            nb_ok += 1

    # print some summary
    print("%d records tested, %d have failed" % (nb_ok+nb_fail, nb_fail))
    return nb_fail
Ejemplo n.º 52
0
def docs_match(old_doc, new_doc):
    """
    Return True if the the docs match, minus the ignorable fields

    Args:
        old_doc: a dict of an elasticsearch doc from the old cluster
        new_doc: a dict of an elasticsearch doc from the new cluster
    """
    """
    example doc:
    {'dictionary_item_added': {
        "root['_source']['_id']",
        "root['_source']['abuse_flaggers']",
        "root['_source']['anonymous']",
        "root['_source']['anonymous_to_peers']",
        "root['_source']['at_position_list']",
        "root['_source']['author_username']",
        "root['_source']['closed']",
        "root['_source']['comment_count']",
        "root['_source']['historical_abuse_flaggers']",
        "root['_source']['pinned']",
        "root['_source']['thread_type']",
        "root['_source']['visible']",
        "root['_source']['votes']",
        "root['found']"},
     'dictionary_item_removed': {
        "root['_source']['id']",
        "root['_source']['thread_id']",
        "root['_source']['votes_point']",
        "root['exists']"},
     'values_changed': {
        "root['_index']": {
            'new_value': u'content_20170324145539907',
            'old_value': u'content_20151207225034'},
        "root['_source']['body']": {
            'new_value': u'encryption neglect hypothesize polluters wining pitiably prophetess apostrophe foretelling assignments diaphragms trustees scroll scruffs shrivels characterizes digraph lasted sharked rewind chamoix charier protoplasm rapports isolated upbraid mortgaged cuddled indefinitely sinful insaner slenderized cemetery deject soundly preventable',
            'old_value': u'embellishing orbitals complying alternation welching sepulchered grate blench placenta landslide dependance hurdle predicted chaplet earsplitting assess awol necrosis freeways skipper delicatessen sponsorship bellboys antiseptics gabardines admittedly screechier professional roughness educations nutting valences iridescence deductions'},
        "root['_source']['title']": {
            'new_value': u'southpaw afterward playgoers roughed requites arrived byplay ninetieth textural rental foreclosing',
            'old_value': u'guttersnipes corduroys ghostly discourtesies'},
        "root['_source']['updated_at']": {
            'new_value': u'2017-03-29T18:51:19Z',
            'old_value': u'2017-03-28T12:58:02Z'},
        "root['_version']": {
            'new_value': 20,
            'old_value': 1}}}
"""
    ignorable_fields = [
        "root['exists']",
        "root['found']",
        "root['_index']",
        "root['updated_at']",
        "root['_version']",
        "root['_score']",
    ]
    diff_types = ['dictionary_item_added', 'dictionary_item_removed', 'values_changed']
    diff_doc = DeepDiff(old_doc, new_doc)

    if 'values_changed' not in diff_doc:
        diff_doc['values_changed'] = set()

    #if this fails something is horribly wrong
    if set(diff_doc.keys()) != set(diff_types):
        print 'ERROR: expected to be diffing dictionaries, got something else! id: {}'.format(
            new_doc['_id'])

    for diff_type in diff_types:
        for field in ignorable_fields:
            if diff_type in diff_doc:
                #values_changed is a set, the other two are dicts
                if isinstance(diff_doc[diff_type], set):
                    diff_doc[diff_type].discard(field)
                else:
                    diff_doc[diff_type].pop(field, None)

    return all(len(diff_doc[diff_type]) == 0 for diff_type in diff_types)
Ejemplo n.º 53
-1
    def main(self, log, args):
        self.log = log
        print("Args: {}".format(args))
        self.log.debug(_("Args: {0}").format(args))
        if args.type not in self.SUPPORTED_TYPES:
            self.log.error(_("Unsupported file type {0}").
                           format(args.type))
            exit(1)

        with open(args.generated_file) as g:
            gen_data = g.read()
            json_gen = json.loads(gen_data)
            self.log.debug(_("Generated: {0}").format(json_gen))

        with open(args.expected_file) as e:
            exp_data = e.read()
            json_exp = json.loads(exp_data)
            self.log.debug(_("Expected: {0}").format(json_exp))

        diff = DeepDiff(json_exp, json_gen)
        self.log.debug(_("Keys in diff: {0}").format(diff.keys()))
        self.log.info(_("Differences:\n"))

        d = pprint.pformat(diff, indent=self.INDENT)
        self.log.info("Differences:\n{0}".format(d))

        if len(set(self.ERROR_ITEMS).intersection(diff.keys())):
            diff_str = pprint.pformat(diff)
            msg = _("Found item changes: {0}").format(diff_str)
            self.log.error(msg)
            raise ValueError(msg)
Ejemplo n.º 54
-1
    def add_or_update_tess_in_country_list(self, tess_object):
        DASH_TEMPLATE_UID = "template_tess_by_regions"  # uid dash template
        DASH_FINAL_UID = "tess_country_" + tess_object["[token_tess_location_country]"].replace(" ", "_")

        # Load template
        dash_info_template = self.__find_template(DASH_TEMPLATE_UID)

        if not "dashboard" in dash_info_template:
            return ({"status": "error", "error": "No exist template " + DASH_TEMPLATE_UID + " in organisation " + str(self.grafana_template_org_id)})

        # Updete Folder ID
        dash_info_template["folderId"] = self.__create_folder(dash_info_template["meta"]["folderTitle"])

        # Replace All Tokens
        dash_info_template = self.__replace_template_tokens(dash_info_template, tess_object)
        if "status" in dash_info_template and dash_info_template["status"] == "error":
            return dash_info_template

        # Search dashboards with this tittle
        results = self.__find_dash_by_title(dash_info_template["dashboard"]["title"])

        # Generate Sun And Moon datasources
        self.generate_sunmoon_datasource(tess_object)

        response = None

        if results:
            message = "Update Dash and"
            # Si ya existe el dashboard
            dash_info = self.__find_dash_by_uid(results[0]['uid'])

            # Remove statics panels
            p = re.compile('stars\d+', re.IGNORECASE)
            in_use_ids = []
            for panel_info in dash_info["dashboard"]["panels"][:]:
                in_use_ids.append(panel_info["id"])

            # Comprobar si existen los paneles en el dash
            panels_ids = []
            # Obtener los paneles del dash que ya contengan el tess_id
            p = re.compile('stars\d+', re.IGNORECASE)
            for idx, panel_info in enumerate(dash_info["dashboard"]["panels"]):
                panel_info_string = json.dumps(panel_info).replace('stars4all', '')
                if tess_object["[token_tess_id]"] in p.findall(panel_info_string):
                    panels_ids.append(idx)

            # Comprobar el estado de los paneles
            if len(panels_ids) == 0:
                message += " added Tess: " + tess_object["[token_tess_id]"]
                # Añadir los paneles nuevos
                for idx in range(len(dash_info_template["dashboard"]["panels"])):
                    dash_info_template["dashboard"]["panels"][idx]["id"] = self.__get_next_id(in_use_ids)
                    in_use_ids.append(self.__get_next_id(in_use_ids))
                    dash_info["dashboard"]["panels"].append(dash_info_template["dashboard"]["panels"][idx])

            else:
                message += " update Tess: " + tess_object["[token_tess_id]"]
                # Actualizar los paneles que ya existen y añadir los que faltan
                template_id_change = []
                for idx in range(len(panels_ids)):
                    for idx_template, panel_template in enumerate(dash_info_template["dashboard"]["panels"]):
                        diff = DeepDiff(dash_info["dashboard"]["panels"][panels_ids[idx]], panel_template)
                        if any(elem in ["dictionary_item_added", "dictionary_item_removed", "iterable_item_removed", "type_changes", "iterable_item_added"] for elem in diff.keys()):
                            continue
                        panel_template["id"] = dash_info["dashboard"]["panels"][panels_ids[idx]]["id"]
                        template_id_change.append(idx_template)

                for idx_template, panel_template in enumerate(dash_info_template["dashboard"]["panels"]):
                    if idx_template not in template_id_change:
                        panel_template["id"] = self.__get_next_id(in_use_ids)
                        in_use_ids.append(self.__get_next_id(in_use_ids))

                # Eliminar los paneles viejos
                panels_ids.sort(reverse=True)
                for idx in panels_ids:
                    dash_info["dashboard"]["panels"].pop(idx)
                panels_ids.sort(reverse=False)

                # Añadir los paneles nuevos guardando la posicion anterior
                len_template = len(dash_info_template["dashboard"]["panels"])
                div_result = divmod(panels_ids[0], len_template)
                if div_result[1] == 0:
                    start_pos = div_result[0] * (len_template)
                else:
                    start_pos = (div_result[0] + 1) * (len_template)

                for idx in range(len_template):
                    dash_info["dashboard"]["panels"].insert(start_pos + idx, dash_info_template["dashboard"]["panels"][idx])

            # Sort
            try:
                arr_no_sort = []
                for i in range(0, len(dash_info["dashboard"]["panels"]), len(dash_info_template["dashboard"]["panels"])):
                    stars_ids = re.findall('\d+', dash_info["dashboard"]["panels"][i]["title"])
                    arr_no_sort.append({"idx": i, "starsid": int(stars_ids[0])})

                arr_sort = sorted(arr_no_sort, key=lambda k: k['starsid'])

                if arr_no_sort != arr_sort:
                    temp_panesl = []

                    for item in arr_sort:
                        for idx in range(len(dash_info_template["dashboard"]["panels"])):
                            temp_panesl.append(dash_info["dashboard"]["panels"][item["idx"] + idx])

                    dash_info["dashboard"]["panels"] = temp_panesl

            except:
                pass

            # Copy annotations and variables
            dash_info["dashboard"]["templating"] = dash_info_template["dashboard"]["templating"]
            dash_info["dashboard"]["annotations"] = dash_info_template["dashboard"]["annotations"]

            # Fix gridPos
            dash_info["dashboard"]["panels"] = self.__fix_gridPos(dash_info["dashboard"]["panels"])

            # Set uid
            dash_info["dashboard"]["uid"] = DASH_FINAL_UID

            # Save
            response = self.__update_dash(dash_info, message=message + " " + str(panels_ids))

        else:
            # Create
            response = self.__create_dash_from_template(
                dash_info_template, message="Create Dash and added Tess: " + tess_object["[token_tess_id]"], uid=DASH_FINAL_UID)

        self.__set_starred_configuration(response, dash_info_template)
        return(response)