Ejemplo n.º 1
0
 def test_sample_data4(self):
     result = get_all_keys(self.sample4)
     self.assertEqual(3, len(result))
     keys_to_verify = [
         "values",
         "checks",
         "monitoring_zones"
     ]
     for key in keys_to_verify:
Ejemplo n.º 2
0
    def set_node_dependencies(self):
        """
        Builds the nodes edges by mapping the other nodes that they are dependant on. This is done through searching for
        key words: DependsOn, InstanceId, and Ref. The keywords are used to show that a particular resource is linked
        in someway to another, so this should be shown on the graph.

        :return:
        """
        def map_dependencies(dependency_tag):
            dependencies_list = nested_lookup(dependency_tag, sub_dict)
            flat_list = self.remove_nested_list_dependencies(dependencies_list)
            self.node_dependencies[resources_list[counter]] = flat_list
            return True

        current_json = self.get_file_to_dict()['Resources'] # Pulls the full JSON, BUT ONLY RESOURCES KEY AND AFTER
        counter = 0
        resources_list = list(current_json)

        for key in list(current_json.items()):
            sub_dict = key[1]
            try:

                if 'Ref' in get_all_keys(sub_dict):
                    # dependencies_list = nested_lookup('Ref', sub_dict)
                    # flat_list = self.remove_nested_list_dependencies(dependencies_list)
                    # self.node_dependencies[resources_list[counter]] = flat_list
                    map_dependencies('Ref')
                elif 'InstanceId' in get_all_keys(sub_dict):
                    # dependencies_list = nested_lookup('InstanceId', sub_dict)
                    # flat_list = self.remove_nested_list_dependencies(dependencies_list)
                    # self.node_dependencies[resources_list[counter]] = flat_list
                    map_dependencies('InstanceId')
                elif 'DependsOn' in get_all_keys(sub_dict):
                    #  This part could be replicated into other statements if they present
                    #  the nested list issue in the future.
                    # dependencies_list = nested_lookup('DependsOn', sub_dict)
                    # flat_list = self.remove_nested_list_dependencies(dependencies_list)
                    # self.node_dependencies[resources_list[counter]] = flat_list
                    map_dependencies('DependsOn')
                counter += 1
            except KeyError:
                continue
Ejemplo n.º 3
0
 def test_sample_data3(self):
     result = get_all_keys(self.sample3)
     self.assertEqual(9, len(result))
     keys_to_verify = [
         "processor_details",
         "processor_name",
         "l2_cache(per_core)",
         "total_number_of_cores",
     ]
     for key in keys_to_verify:
         self.assertIn(key, result)
Ejemplo n.º 4
0
 def test_sample_data2(self):
     result = get_all_keys(self.sample2)
     self.assertEqual(10, len(result))
     keys_to_verify = [
         "hardware_details",
         "processor_speed",
         "total_numberof_cores",
         "memory",
     ]
     for key in keys_to_verify:
         self.assertIn(key, result)
Ejemplo n.º 5
0
 def test_sample_data1(self):
     result = get_all_keys(self.sample1)
     self.assertEqual(15, len(result))
     keys_to_verify = [
         "model_name",
         "core_details",
         "l2_cache(per_core)",
         "build_version",
         "date",
     ]
     for key in keys_to_verify:
         self.assertIn(key, result)
Ejemplo n.º 6
0
 def test_sample_data5(self):
     result = get_all_keys(self.sample5)
     self.assertEqual(7, len(result))
     keys_to_verify = [
         "listings",
         "name",
         "postcode",
         "full_address",
         "city",
         "lat",
         "lng",
     ]
     for key in keys_to_verify:
         self.assertIn(key, result)
Ejemplo n.º 7
0
    
        
print("New updated dictionary dict3:", dict3)


# # The following lines of code are just to understand that how can one split a string basis a delimiter and generate a dictionary out of it.

# In[3]:


s1 = "Name1=Value1;Name2=Value2;Name3=Value3"

dict(map(lambda x: x.split('='), s1.split(';')))


# In[5]:


mutli_attr_dictionary = "ID=gene:SNSG00000223972;Name=SSX11L1;biotype=transcribe;description=DEAD/H-box helicase 11 like 1 [Source:HGNC Symbol%3BAcc:HGNC:37102];sample_id=SNSG00000223972;logic_name=javana_hemo_saiens;version=6"

dict(map(lambda x: x.split('='), mutli_attr_dictionary.split(';')))


# In[ ]:


from nested_lookup import get_all_keys
keys = get_all_keys(dict2)
print(keys)

Ejemplo n.º 8
0
def yamlGenerator(bookTitle, data):
    bks = get_books(onlybooks=True)
    tstbk = bookTitle.strip()
    bkinfo = get_books(onlybooks=False, filename=bks[tstbk])

    # All Data
    mainData = bkinfo[tstbk]['data']
    links = bkinfo[tstbk]['links']
    metadata = bkinfo[tstbk]['metadata']
    filenm = metadata['filename']
    # data = pickle.load(open(bookData, "rb"))

    final = {}
    for d in data:
        if d['parent'] == '#':
            final[d['id']] = d['children']

    temp = {}
    temp2 = {}

    for keys, values in final.items():
        lst = rec(data, values)
        temp[keys] = lst

    keyList = get_all_keys(temp)
    idList = unpack(temp)
    temp = str(temp)
    for item in idList:
        for i in range(len(mainData)):
            t = mainData[i]
            if item == t['id']:
                temp = temp.replace(item, links[i])

    for item in keyList:
        if '/' in item:
            tempItem = item.split("/")
            temp = temp.replace(item, tempItem[-1])

    temp = eval(temp)

    temp2['metadata'] = metadata
    temp2['BOOK'] = [temp]
    temp2 = temp2

    mystr = yaml.dump(yaml.load(json.dumps(temp2), Loader=yaml.SafeLoader),
                      default_flow_style=False)
    mystr = re.sub(r'([\n])  ([A-Z])', r'\n- \2', mystr)

    hash_object = hashlib.md5(mystr.encode())
    hex_dig = hash_object.hexdigest()

    # print(mystr)

    # with open('dest/booksgenerated/nai_test.yaml', 'w+') as f:
    #     f.write(mystr)

    # return filenm

    path = path_expand(f'./dest/booksgenerated/{hex_dig}.yaml')
    with open(path, 'w+') as f:
        f.write(mystr)
    return filenm, hex_dig
Ejemplo n.º 9
0
import json
import io
import nested_lookup
from nested_lookup import get_all_keys
from nested_lookup import get_occurrence_of_key

with io.open('petitionz.json', encoding ='ISO-8859-1') as json_file:
	content = json_file.read()
	json_data = json.loads(content)
	
print (type(json_data))	

print (get_all_keys(json_data))
print (get_occurence_of_key(json_data, key='signature_count'))
#def find(key, dictionary):
    #for k, v in json_data.items():
        #if k == key:
         #   yield v
        #elif isinstance(v, dict):
        #    for result in find(key, v):
       #         yield result
      #  elif isinstance(v, list):
     #       for d in v:
    #            for result in find(key, d):
   #                 yield result
  
list(find('signatures_by_country', json_data))
print (list)
#jdata = json.load(json_data.decode("utf-8","ignore"))
#print(json.dumps(data, indent =4, sort_keys=True))
Ejemplo n.º 10
0
def colenFunc(dict):
    allkeys = get_all_keys(dict)
    sorted(allkeys)
    sortdupekeys = getDuplicatesWithCount(allkeys)
    json.dumps(sortdupekeys)
    sys.stdout.write(str(sortdupekeys) + '\n')
Ejemplo n.º 11
0
def recursiveFilterNodes(dictIn):
    dictOut = {}
    queryList = list(dictIn.items())
    for pair in queryList:
        if (type(pair[1]) != dict):
            dictOut[pair[0]] = {
            }  # make the -1, -2 value key pairs become a key : {} pair
        else:  # this is
            dictOut[pair[0]] = recursiveFilterNodes(dictIn[
                pair[0]])  # this inputs the key : DICT value for key pair[0]
    return dictOut


electricBoogalu = recursiveCallback(outputStructure)
# every key is filtered for uniqueness ( a set )
uniqueKeys = list(set(nested_lookup.get_all_keys(electricBoogalu)))

# Just want to be stuck with the list of unique header keys
# will also include header files that arent found
sF = []
for x in sourceFiles:
    sF.append(x[28:])
queryKeys = uniquify(sF, uniqueKeys)
# --> want to get the total number of header nodes to look at

# get_occurrences_and_values returns a data type like:
# {"occurrences":# of times,"values":{}} --> everything inside is values

# terminal node, just like {} nodes (empty nodes)
notFound = nested_lookup.get_occurrences_and_values(
    [electricBoogalu], value=ERROR_ENUM["ERROR: FILE NOT FOUND"])
for rtr in rtr_list:
    nodes = rtr
    tb = load('Operation_List_Master.yaml')
    dev = tb.devices[nodes]
    d = Dialog([
        Statement(pattern=r'Permission denied',
                  action=None,
                  args=None,
                  loop_continue=True,
                  continue_timer=False)
    ])
    dev.connect(connect_reply=d)
    p1 = dev.parse('show running-config')
    p2 = dev.parse('dir bootflash:')

    SHOW_RUN = get_all_keys(p1)
    r = re.compile("boot ")
    SHOW_RUN_1 = list(filter(r.match, SHOW_RUN))

    DIR_BOOTFLASH = list(p2['dir']['bootflash:/']['files'].keys())
    rr = re.compile("asr\S+.16.12.04.SPA.bin")
    DIR_BOOTFLASH_1 = list(filter(rr.match, DIR_BOOTFLASH))

    CONFIGURE = []
    for IOS in SHOW_RUN_1:
        CONFIGURE.append('no ' + IOS)

    if not DIR_BOOTFLASH_1:
        #print("\nPLEASE UPLOAD NEW IOS TO THE BOOTFLASH: OF " + nodes + "\n")
        FINAL_STATUS.append('Please upload new IOS to the bootflash: of ' +
                            nodes)