def linksets(self, id=None): #load controller for processing datasets by tag, group, organization, random number of datasets and all datasets pan = PANController() tempDatasets = toolkit.get_action('package_list')( data_dict={}) pan_rezultati = pan.process_datasets(tempDatasets) #get datasets with relations dR = pan_rezultati['dR'] c.linksets = dR tagURL = helpers.url_for(controller="package", action='read',qualified=True) tagURL = tagURL.rstrip('packages') + 'dataset' c.tagURL = tagURL if (id == 'rdf'): response.headers['Content-Type'] = 'application/rdf+xml; charset=utf-8' return render('linksets/linksets.rdf') elif(id == 'nt'): response.headers['Content-Type'] = 'text/n3; charset=utf-8' return render('linksets/linksets.nt') else: response.headers['Content-Type'] = 'text/n3; charset=utf-8' return render('linksets/linksets.n3')
def linksets(self, id=None): #load controller for processing datasets by tag, group, organization, random number of datasets and all datasets pan = PANController() tempDatasets = toolkit.get_action('package_list')(data_dict={}) pan_rezultati = pan.process_datasets(tempDatasets) #get datasets with relations dR = pan_rezultati['dR'] c.linksets = dR tagURL = helpers.url_for(controller="package", action='read', qualified=True) tagURL = tagURL.rstrip('packages') + 'dataset' c.tagURL = tagURL if (id == 'rdf'): response.headers[ 'Content-Type'] = 'application/rdf+xml; charset=utf-8' return render('linksets/linksets.rdf') elif (id == 'nt'): response.headers['Content-Type'] = 'text/n3; charset=utf-8' return render('linksets/linksets.nt') else: response.headers['Content-Type'] = 'text/n3; charset=utf-8' return render('linksets/linksets.n3')
def machineProcessable(self, subjectInfo, objectInfo): #creating a list of machine processable formats formats = [ 'rdf', 'ttl', 'rdfa', 'rdf+xml', 'n3', 'n-triples', 'nq', 'sparql', 'csv', 'json', 'tsv', 'xml', 'open xml' ] mp = '' #examine whether object dataset have at least one machine processable format formatsSO = 0 formatsOS = 0 sDF = [] oDF = [] pan = PANController() #get data formats in subject and object dataset sDF = pan._get_formats(subjectInfo['name']) oDF = pan._get_formats(objectInfo['name']) #examine whether subject dataset have at least one machine processable format for keyS, valueS in enumerate(sDF): for keyF, valueF in enumerate(formats): if (valueS.lower() == valueF): formatsSO += 1 #examine whether object dataset have at least one machine processable format for keyO, valueO in enumerate(oDF): for keyF, valueF in enumerate(formats): if (valueO.lower() == valueF): formatsOS += 1 #compare results if all(x > 0 for x in (formatsSO, formatsOS)): mp = 'true' else: mp = 'false' return mp
def machineProcessable(self, subjectInfo, objectInfo): #creating a list of machine processable formats formats = ['rdf','ttl','rdfa','rdf+xml','n3','n-triples','nq','sparql','csv','json','tsv','xml','open xml'] mp = '' #examine whether object dataset have at least one machine processable format formatsSO = 0 formatsOS = 0 sDF = [] oDF = [] pan = PANController() #get data formats in subject and object dataset sDF = pan._get_formats(subjectInfo['name']) oDF = pan._get_formats(objectInfo['name']) #examine whether subject dataset have at least one machine processable format for keyS, valueS in enumerate(sDF): for keyF, valueF in enumerate(formats): if (valueS.lower() == valueF): formatsSO += 1 #examine whether object dataset have at least one machine processable format for keyO, valueO in enumerate(oDF): for keyF, valueF in enumerate(formats): if (valueO.lower() == valueF): formatsOS += 1 #compare results if all(x > 0 for x in (formatsSO, formatsOS)): mp = 'true' else: mp = 'false' return mp
def linkedFormat(self, subjectInfo, objectInfo): #creating a list of linked data formats formats = [ 'rdf', 'rdfa', 'ttl', 'n3', 'nq', 'rdf+xml', 'turtle', 'n-triples' ] lF = '' formatsSO = 0 formatsOS = 0 sDF = [] oDF = [] pan = PANController() #get data formats in subject and object dataset sDF = pan._get_formats(subjectInfo['name']) oDF = pan._get_formats(objectInfo['name']) #examine whether subject dataset have at least one linked data format for keyS, valueS in enumerate(sDF): for keyF, valueF in enumerate(formats): if (valueS.lower() == valueF): formatsSO += 1 #examine whether object dataset have at least one linked data format for keyO, valueO in enumerate(oDF): for keyF, valueF in enumerate(formats): if (valueO.lower() == valueF): formatsOS += 1 #compare results if all(x > 0 for x in (formatsSO, formatsOS)): lF = 'true' else: lF = 'false' return lF
def linkedFormat(self, subjectInfo, objectInfo): #creating a list of linked data formats formats = ['rdf', 'rdfa', 'ttl', 'n3', 'nq' ,'rdf+xml', 'turtle', 'n-triples'] lF = '' formatsSO = 0 formatsOS = 0 sDF = [] oDF = [] pan = PANController() #get data formats in subject and object dataset sDF = pan._get_formats(subjectInfo['name']) oDF = pan._get_formats(objectInfo['name']) #examine whether subject dataset have at least one linked data format for keyS, valueS in enumerate(sDF): for keyF, valueF in enumerate(formats): if (valueS.lower() == valueF): formatsSO += 1 #examine whether object dataset have at least one linked data format for keyO, valueO in enumerate(oDF): for keyF, valueF in enumerate(formats): if (valueO.lower() == valueF): formatsOS += 1 #compare results if all(x > 0 for x in (formatsSO, formatsOS)): lF = 'true' else: lF = 'false' return lF
def compareFormats(self, subjectInfo, objectInfo): formatsSO = 0 formatsOS = 0 sDF = [] oDF = [] pan = PANController() #get data formats in subject and object dataset sDF = pan._get_formats(subjectInfo['name']) oDF = pan._get_formats(objectInfo['name']) #examine number of similar formats from subject dataset in object dataset for keyS, valueS in enumerate(sDF): for keyO, valueO in enumerate(oDF): if (valueS == valueO): formatsSO += 1 #examine number of similar formats from object dataset in subject dataset for keyO, valueO in enumerate(oDF): for keyS, valueS in enumerate(sDF): if (valueO == valueS): formatsOS += 1 #calculating mean value for both cases formatsSO = formatsSO / len(oDF) formatsSO = int(round(formatsSO, 2) * 100) formatsOS = formatsOS / len(sDF) formatsOS = int(round(formatsOS, 2) * 100) results = [formatsSO, formatsOS] return results
def manager(self): #load controller for processing datasets by tag, group, organization, random number of datasets and all datasets pan = PANController() #load controller for preparing datasets for visual display relin = RELINController() #load controller for using functions to process datasets func = FUNCTIONSController() #create temporary list for storing results of processing of datasets tempDatasets = [] #IF conditions for storing HTML fields values sent by HTTP from index page if ('form[random]' not in request.params): randomNum = 0 elif request.params['form[random]'] == '': randomNum = 0 else: randomNum = int(request.params['form[random]']) if ('form[tag]' not in request.params): tag = '' else: tag = request.params['form[tag]'] if ('form[group]' not in request.params): group = '' else: group = request.params['form[group]'] if ('form[organization]' not in request.params): organization = '' else: organization = request.params['form[organization]'] #examining which field on start HTML user choosed and processing datasets by that condition if (randomNum > 0): tempDatasets = toolkit.get_action('package_list')( data_dict={}) datasets = random.sample( tempDatasets, randomNum ) pan_rezultati = pan.process_datasets(datasets) elif (tag != ''): tempTags = toolkit.get_action('tag_show')( data_dict={'id': tag}) for tempList in tempTags['packages']: for tempKey,tempValue in tempList.items(): if 'name' in tempKey: tempDatasets.append(tempValue) pan_rezultati = pan.process_datasets(tempDatasets) elif (group != ''): tempGroup = toolkit.get_action('group_show')( data_dict={'id': group}) for tempList in tempGroup['packages']: for tempKey,tempValue in tempList.items(): if 'name' in tempKey: tempDatasets.append(tempValue) pan_rezultati = pan.process_datasets(tempDatasets) elif (organization != ''): tempOrganization = toolkit.get_action('organization_show')( data_dict={'id': organization}) for tempList in tempOrganization['packages']: for tempKey,tempValue in tempList.items(): if 'name' in tempKey: tempDatasets.append(tempValue) pan_rezultati = pan.process_datasets(tempDatasets) else: tempDatasets = toolkit.get_action('package_list')( data_dict={}) pan_rezultati = pan.process_datasets(tempDatasets) #get names of datasets who have relations relationships = pan_rezultati['relationships'] #get datasets with relations dR = pan_rezultati['dR'] #get data about datasets that will be displayed datasets = pan_rezultati['datasets'] #get all relations for each dataset eKey = relin.oneToAllRelationships(relationships,datasets) #filter commutative relations eKey = relin.removeCommutative(eKey) #get all datasets from portal datasetsPortal = toolkit.get_action('package_list')( data_dict={}) #replace datasets name with their keys from datasetsPortal dict because jQuery library uses object with names e10, e2, e3 to draw relations for k,v in enumerate(eKey): if (eKey[k]['subject'] in datasetsPortal): eKey[k]['type'] = func.getType(eKey[k]['subject'],eKey[k]['object'],dR) eKey[k]['subject'] = func.getKey(eKey[k]['subject'],datasets) eKey[k]['object'] = func.getKey(eKey[k]['object'],datasets) else: del eKey[k] #specify formats that describe linked data LDF = ['rdf','rdfa','sparql','n-triples','turtle','n3','nq'] #preparing data for using in HTML template c.eKey = eKey c.datasets = datasets c.ldf = LDF tagURL = helpers.url_for(controller="package", action='read',qualified=True) c.tagURL = tagURL.rstrip('packages') + 'dataset' c.relin_info_test = '' #specifying HTML template to render return p.toolkit.render('lire/manager.html')
def manager(self): #load controller for processing datasets by tag, group, organization, random number of datasets and all datasets pan = PANController() #load controller for preparing datasets for visual display relin = RELINController() #load controller for using functions to process datasets func = FUNCTIONSController() #create temporary list for storing results of processing of datasets tempDatasets = [] #IF conditions for storing HTML fields values sent by HTTP from index page if ('form[random]' not in request.params): randomNum = 0 elif request.params['form[random]'] == '': randomNum = 0 else: randomNum = int(request.params['form[random]']) if ('form[tag]' not in request.params): tag = '' else: tag = request.params['form[tag]'] if ('form[group]' not in request.params): group = '' else: group = request.params['form[group]'] if ('form[organization]' not in request.params): organization = '' else: organization = request.params['form[organization]'] #examining which field on start HTML user choosed and processing datasets by that condition if (randomNum > 0): tempDatasets = toolkit.get_action('package_list')( data_dict={}) datasets = random.sample( tempDatasets, randomNum ) pan_rezultati = pan.process_datasets(datasets) elif (tag != ''): tempTags = toolkit.get_action('tag_show')( data_dict={'id': tag}) for tempList in tempTags['packages']: for tempKey,tempValue in tempList.items(): if 'name' in tempKey: tempDatasets.append(tempValue) pan_rezultati = pan.process_datasets(tempDatasets) elif (group != ''): tempGroup = toolkit.get_action('group_show')( data_dict={'id': group}) for tempList in tempGroup['packages']: for tempKey,tempValue in tempList.items(): if 'name' in tempKey: tempDatasets.append(tempValue) pan_rezultati = pan.process_datasets(tempDatasets) elif (organization != ''): tempOrganization = toolkit.get_action('organization_show')( data_dict={'id': organization}) for tempList in tempOrganization['packages']: for tempKey,tempValue in tempList.items(): if 'name' in tempKey: tempDatasets.append(tempValue) pan_rezultati = pan.process_datasets(tempDatasets) else: tempDatasets = toolkit.get_action('package_list')( data_dict={}) pan_rezultati = pan.process_datasets(tempDatasets) #get names of datasets who have relations relationships = pan_rezultati['relationships'] #get datasets with relations dR = pan_rezultati['dR'] #get data about datasets that will be displayed datasets = pan_rezultati['datasets'] #get all relations for each dataset eKey = relin.oneToAllRelationships(relationships,datasets) #filter commutative relations eKey = relin.removeCommutative(eKey) #get all datasets from portal datasetsPortal = toolkit.get_action('package_list')( data_dict={}) #replace datasets name with their keys from datasetsPortal dict because jQuery library uses object with names e10, e2, e3 to draw relations for k,v in enumerate(eKey): if (eKey[k]['subject'] in datasetsPortal): eKey[k]['type'] = func.getType(eKey[k]['subject'],eKey[k]['object'],dR) eKey[k]['subject'] = func.getKey(eKey[k]['subject'],datasets) eKey[k]['object'] = func.getKey(eKey[k]['object'],datasets) else: del eKey[k] #specify formats that describe linked data LDF = ['rdf','rdfa','sparql','n-triples','turtle','n3','nq', 'nt'] #preparing data for using in HTML template c.eKey = eKey c.datasets = datasets c.ldf = LDF tagURL = helpers.url_for(controller="package", action='read',qualified=True) c.tagURL = tagURL.rstrip('packages') + 'dataset' c.relin_info_test = '' #specifying HTML template to render return p.toolkit.render('lire/manager.html')