def q(args, sql): con = sqlite3.connect(args.dbfile) cur = con.cursor() shlog.verbose(sql) result = cur.execute(sql) con.commit() return result
def post_resource_assign(serv, usr, passw, activity, name): resource = resource_id_from_name(serv, usr, passw, name) if resource: # check if the resource had already been assigned request_data = { 'Field': ['ResourceObjectId'], 'Filter': "ActivityObjectId = '%s' and ResourceObjectId = '%s'" % (str(activity), str(resource)) } resp = m.soap_request(request_data, primaserver, 'ResourceAssignmentService', 'ReadResourceAssignments', primauser, primapasswd) if len(resp) > 0: # already assigned synched = 'already assigned to activity #' + str(activity) shlog.verbose(name + ' ' + synched) else: # actual post request_data = { 'ResourceAssignment': { 'ActivityObjectId': activity, 'ResourceObjectId': resource } } synched = m.soap_request(request_data, serv, 'ResourceAssignmentService', 'CreateResourceAssignments', usr, passw) else: return name + ' not found in resources' return synched
def catenate_workspace(self, workspace_to_catenate): for n in range(1, workspace_to_catenate.row): self.row += 1 shlog.verbose("catenting row %s" % self.row) columns_from_other = workspace_to_catenate.content[n] self.content[self.row] = columns_from_other self.col_max = max(self.col_max, workspace_to_catenate.col_max)
def get_activity_tickets(server, user, passw, serv): # Get all activity -> ticket field records if 'ncsa' in serv.lower(): request_data = { 'Field': ['ForeignObjectId', 'Text'], 'Filter': "UDFTypeTitle = 'NCSA Jira Mapping'" } if 'lsst' in serv.lower(): request_data = { 'Field': ['ForeignObjectId', 'Text'], 'Filter': "UDFTypeTitle = 'LSST Jira Mapping'" } shlog_list = '' for field in request_data['Field']: shlog_list += field + ', ' shlog.verbose( 'Making Primavera request to get all activities with recorded Epics, fields: ' + shlog_list[:-2]) tickets_api = soap_request(request_data, server, 'UDFValueService', 'ReadUDFValues', user, passw) # create a dict with activity -> ticket relations tickets = {} for tkt in tickets_api: try: if re.match('[A-Z]+-[0-9]+', tkt.Text): tickets.update({tkt.ForeignObjectId: tkt.Text}) except TypeError: # caused by "None" values pass return tickets
def ingest_connections(args, sqldbfile): shlog.normal("about to open %s", sqldbfile) con = sqlite3.connect(args.dbfile) con_temp = sqlite3.connect(sqldbfile) c_temp = con_temp.cursor() # the ingest_properties query retrieves properties from the archidump database sql = """/*Retrieve id and the most recent version of a model matched by name*/ /*desired_model should return one single row*/ WITH desired_model(id, version, created_on) AS (SELECT id, version, max(created_on) FROM models m WHERE m.name='%s' GROUP BY id), /*Model stores all views that ever existed in all the models, regardless of they exist in the recent versions*/ /*That's why we retrieve the views that match the model id+model version from desired_model*/ desired_views(view_id, view_version, parent_folder_id) AS (SELECT view_id, view_version, parent_folder_id FROM views_in_model vim INNER JOIN desired_model dm on dm.version=vim.model_version AND dm.id=vim.model_id), /* Now that we have the most recent view versions from the most recent model, we can get the most recent connections */ desired_connections(connection_id, connection_version, view_version, view_id) AS (SELECT DISTINCT vciv.connection_id, vciv.connection_version, vciv.view_version, vciv.view_id FROM desired_views dv INNER JOIN views_connections_in_view vciv on vciv.view_id = dv.view_id AND vciv.view_version = dv.view_version) SELECT vc.id as connection_id, dc.view_id, vc.relationship_id FROM views_connections vc INNER JOIN desired_connections dc on vc.id=dc.connection_id AND vc.version = dc.connection_version""" % args.prefix shlog.verbose(sql) c_temp.execute(sql) rows = c_temp.fetchall() connectionsTable.insert(con, rows) ingestTable.insert(con, [[iso_now(), sqldbfile, 'CONNECTIONS']])
def ingest_relations(args, sqldbfile): shlog.normal("about to open %s", sqldbfile) con = sqlite3.connect(args.dbfile) con_temp = sqlite3.connect(sqldbfile) c_temp = con_temp.cursor() # the ingest_relations query retrieves relevant relations from the archidump database sql = """/*Retrieve id and the most recent version of a model matched by name*/ /*desired_model should return one single row*/ WITH desired_model(id, version, created_on) AS (SELECT id, version, max(created_on) FROM models m WHERE m.name='%s' GROUP BY id), /*Model stores all relations that ever existed in all the models, regardless of they exist in the recent versions*/ /*That's why we retrieve relations that match the model id+model version from desired_model*/ desired_model_relations(relationship_id, relationship_version) AS (SELECT relationship_id, relationship_version FROM relationships_in_model rim INNER JOIN desired_model dm on dm.version=rim.model_version AND dm.id=rim.model_id /* Hotfix: remove relationship_id not found in views */ WHERE relationship_id in (SELECT DISTINCT relationship_id FROM views_connections)) /*With the correct relations ids+versions identified, we can retrieve the matches from the relations table that has all the properties*/ SELECT r.id, r.class as Type, r.name, REPLACE(r.documentation,"'","''"), r.source_id as source, r.target_id as Target FROM relationships r INNER JOIN desired_model_relations dmr on dmr.relationship_id=r.id AND dmr.relationship_version=r.version """ % args.prefix shlog.verbose(sql) c_temp.execute(sql) rows = c_temp.fetchall() relationsTable.insert(con, rows) ingestTable.insert(con, [[iso_now(), sqldbfile, 'RELATIONS']])
def ingest_elements(args, sqldbfile): shlog.normal("about to open %s", sqldbfile) con = sqlite3.connect(args.dbfile) con_temp = sqlite3.connect(sqldbfile) c_temp = con_temp.cursor() # the ingest_elements query retrieves relevant elements from the archidump database sql = """/*Retrieve id and the most recent version of a model matched by name*/ /*desired_model should return one single row*/ WITH desired_model(id, version, created_on) AS (SELECT id, version, max(created_on) FROM models m WHERE m.name='%s' GROUP BY id), /*Model stores all elements that ever existed in all the models, regardless of they exist in the recent versions*/ /*That's why we retrieve elements that match the model id+model version from desired_model*/ desired_model_elements(element_id, element_version, parent_folder_id) AS (SELECT element_id, element_version, parent_folder_id FROM elements_in_model eim INNER JOIN desired_model dm on dm.version=eim.model_version AND dm.id=eim.model_id) /*With the correct element ids+versions identified, we can retrieve the matches from the elements table that has all the properties*/ SELECT e.id, e.class as Type, e.name, REPLACE(e.documentation,"'","''"), dme.parent_folder_id as ParentFolder FROM elements e INNER JOIN desired_model_elements dme on dme.element_id=e.id AND dme.element_version=e.version """ % args.prefix shlog.verbose(sql) c_temp.execute(sql) rows = c_temp.fetchall() elementsTable.insert(con, rows) ingestTable.insert(con, [[iso_now(), sqldbfile, 'ELEMENTS']]) con_temp.close()
def get_step_tickets(server, user, passw, serv): # Get all step -> ticket field records if 'ncsa' in serv.lower(): request_data = { 'Field': ['ForeignObjectId', 'Text'], 'Filter': "UDFTypeTitle = 'NCSA JIRA ID'" } if 'lsst' in serv.lower(): request_data = { 'Field': ['ForeignObjectId', 'Text'], 'Filter': "UDFTypeTitle = 'LSST JIRA ID'" } shlog_list = '' for field in request_data['Field']: shlog_list += field + ', ' shlog.verbose( 'Making Primavera request to get all steps with recorded Stories, fields: ' + shlog_list[:-2]) step_tickets_api = soap_request(request_data, server, 'UDFValueService', 'ReadUDFValues', user, passw) # create a dict with step -> ticket relations step_tickets = {} for tkt in step_tickets_api: try: if re.match('[A-Z]+-[0-9]+', tkt.Text): step_tickets.update({tkt.ForeignObjectId: tkt.Text}) except TypeError: # type error might be caused by None values. ignore error. pass return step_tickets
def get_trigger(args): # return a list of html files to nuke shlog.normal("about to open %s", args.dbfile) con = sqlite3.connect(args.dbfile) curs = con.cursor() # this query returns all views that have their paths matched with args.searchterm if args.search_term is None: sql = """SELECT Source as Id FROM RELATIONS r INNER JOIN ELEMENTS e1 on e1.ID = Source INNER JOIN ELEMENTS e2 on e2.ID = Target WHERE e1.Type = 'BusinessEvent' AND e2.Type = 'ApplicationProcess' AND r.Type = 'TriggeringRelationship'""" else: sql = """WITH triggers(Id) AS (SELECT Source as Id FROM RELATIONS r INNER JOIN ELEMENTS e1 on e1.ID = Source INNER JOIN ELEMENTS e2 on e2.ID = Target WHERE e1.Type = 'BusinessEvent' AND e2.Type = 'ApplicationProcess' AND r.Type = 'TriggeringRelationship') SELECT DISTINCT Object_id as Id FROM FOLDER f INNER JOIN VIEWS v on v.Parent_folder_id = f.Id INNER JOIN VIEW_OBJECTS vo on v.Id = vo.View_id INNER JOIN triggers t on t.Id = vo.Object_id WHERE f.Depth like '%F1LL3R%'""".replace('F1LL3R', str(args.search_term)) shlog.verbose(sql) curs.execute(sql) rows = curs.fetchall() trigger_list = [x[0] for x in rows] return trigger_list
def add_element(self, content_element): # add populate the curent celle on the current row. self.content[self.row][self.col] = content_element shlog.verbose("content: (%s,%s):%s" % (self.row, self.col, self.content[self.row][self.col])) self.col_max = max(self.col_max, self.col) self.col += 1
def search_for_parent(self, project, summary): summary = summary.replace('"', '') # checked - search still works the same jql = '''project = "%s" and summary ~ "%s"''' % (project, summary) shlog.verbose('Issuing JQL query: ' + jql) issue = self.jira.search_issues(jql) count = len(issue) return (issue, count)
def qtranspose(args, sql): shlog.verbose(sql) results = [] rets = q(args, sql).fetchall() if not rets: return [[], []] for result in rets: results.append(result) return zip(*results)
def q(args, sql): #a funnel routned for report queries, main benefit is query printing con = sqlite3.connect(args.dbfile) con.text_factory = lambda x: x.decode("utf-8") cur = con.cursor() shlog.verbose(sql) result = cur.execute(sql) con.commit() return result
def qdescription(args, sql): con = sqlite3.connect(args.dbfile) con.row_factory = sqlite3.Row cur = con.cursor() shlog.verbose("description query %s" % sql) results = cur.execute (sql) description = [d[0] for d in cur.description] shlog.verbose("description obtained %s" % (description)) con.close() return description
def qd(args, sql): #return results of query as a list of dictionaries, #one for each row. con = sqlite3.connect(args.dbfile) con.row_factory = sqlite3.Row cur = con.cursor() shlog.verbose(sql) results = cur.execute (sql) shlog.normal(results) return results
def ingest_view_objects(args, sqldbfile): shlog.normal("about to open %s", sqldbfile) con = sqlite3.connect(args.dbfile) con_temp = sqlite3.connect(sqldbfile) c_temp = con_temp.cursor() # the ingest_properties query retrieves properties from the archidump database sql = """/*Retrieve id and the most recent version of a model matched by name*/ /*desired_model should return one single row*/ WITH desired_model(id, version, created_on) AS (SELECT id, version, max(created_on) FROM models m WHERE m.name='%s' GROUP BY id), /*Model stores all objects that ever existed in all the models, regardless of they exist in the recent versions*/ /*That's why we retrieve the view objects that match the model id+model version from desired_model*/ desired_views(view_id, view_version) AS (SELECT view_id, view_version FROM views_in_model vim INNER JOIN desired_model dm on dm.version=vim.model_version AND dm.id=vim.model_id), /*objects are versioned as well, so we get their ids too*/ objects_in_view(view_id, view_version, object_id, object_version) AS (SELECT DISTINCT dv.view_id, dv.view_version, voiv.object_id, voiv.object_version FROM desired_views dv INNER JOIN views_objects_in_view voiv on voiv.view_id=dv.view_id AND voiv.view_version=dv.view_version ), /*With the correct view object ids+versions identified, we can retrieve the matches from the views_objects table that have all the properties*/ /* the first round is to get objects that are not inside other objects ("root objects") */ null_view_objects(id, version, class, container_id, element_id, name, content) AS (SELECT vo.id, vo.version, vo.class, vo.container_id, vo.element_id, name, content FROM objects_in_view oiv INNER JOIN views_objects vo on vo.container_id=oiv.view_id AND vo.id=oiv.object_id AND vo.version=oiv.object_version), /* With root elements now identified, we can loop join view_objects where object_id is the container id */ depths(id, version, class, container_id, root_view_id, element_id, object_id, name, content) AS ( SELECT id, version, class, container_id, container_id as root_view_id, element_id, id as object_id, name, content FROM null_view_objects UNION ALL SELECT vo.id, vo.version, vo.class, vo.container_id, depths.root_view_id, vo.element_id, vo.id as object_id, vo.name, vo.content FROM views_objects vo /* Below is the most important line in the entire query*/ JOIN depths on vo.container_id = depths.id ), /* After this looped JOIN, filter the relevant objects with the help of objects_in_view that has proper object=>view relation */ desired_objects(view_id, element_id, object_id, class, name, content, container_id) AS (SELECT DISTINCT oiv.view_id, depths.element_id, depths.object_id, depths.class, depths.name, depths.content, depths.container_id FROM depths INNER JOIN objects_in_view oiv on oiv.object_id = depths.id AND oiv.object_version = depths.version AND oiv.view_id = depths.root_view_id) /* container_ids that are enclaves are still using their object_ids intead of element_ids. Time to fix that */ SELECT DISTINCT do.view_id, do.element_id as Object_id, do.class, do.name, do.content, IFNULL(do2.element_id, do.container_id) as container_id FROM desired_objects do LEFT JOIN desired_objects do2 on do.container_id = do2.object_id """ % args.prefix shlog.verbose(sql) c_temp.execute(sql) rows = c_temp.fetchall() viewobjectsTable.insert(con, rows) ingestTable.insert(con, [[iso_now(), sqldbfile, 'VIEW_OBJECTS']])
def insert(self, con, rows): # insert rows of Ascii into the database table # after applying conversion functions. insert_statement = (',').join(["?" for name in self.columns]) insert_statement = "insert into " + self.tableName + " values (" + insert_statement + ")" shlog.verbose(insert_statement) cur = con.cursor() for row in rows: shlog.debug(row) # apply convention functions r = ([f(item) for (item, f) in zip(row, self.hfm)]) cur.execute(insert_statement, r) con.commit()
def ticket_wipe(server, user, pw, ForeignObjectId, UDFTypeObjectId): request_data = { 'ObjectId': { 'UDFTypeObjectId': str(UDFTypeObjectId), 'ForeignObjectId': str(ForeignObjectId) } } shlog.verbose('Making DeleteUDFValues request for UDFTypeObjectId ' + str(UDFTypeObjectId) + ', ForeignObjectId ' + str(ForeignObjectId)) response = soap_request(request_data, server, 'UDFValueService', 'DeleteUDFValues', user, pw) shlog.verbose('Server response: ' + str(response)) return response
def search_for_issue(self, summary, parent=None, name_only_search=False): summary = summary.replace('"', '') # checked - search still works the same if parent: jql = '''summary ~ "\\"%s\\"" and "Epic Link" = "%s"''' % (summary, parent) else: jql = '''summary ~ "\\"%s\\"" and issuetype = Epic''' % (summary) if name_only_search: jql = '''summary ~ "\\"%s\\""''' % (summary) shlog.verbose('Issuing JQL query: ' + jql) issue = self.jira.search_issues(jql) count = len(issue) return (issue, count)
def ingest_enclave_content(args, sqldbfile): shlog.normal("about to open %s", args.dbfile) con = sqlite3.connect(args.dbfile) # note: ingest_enclave_content connects to the same DB for source and output data con_temp = sqlite3.connect(args.dbfile) c_temp = con_temp.cursor() # the ingest_properties query retrieves properties from the archidump database sql = """SELECT DISTINCT r.Target as Object_id, e.Id as Enclave_id FROM RELATIONS r INNER JOIN ENCLAVES e on e.Id = r.Source""" shlog.verbose(sql) c_temp.execute(sql) rows = c_temp.fetchall() enclavecontentTable.insert(con, rows) ingestTable.insert(con, [[iso_now(), args.dbfile, 'ENCLAVE_CONTENT']])
def qd(args, sql, passed_stanza): #return results of query as a list of dictionaries, #one for each row. con = sqlite3.connect(args.dbfile) con.row_factory = sqlite3.Row cur = con.cursor() shlog.verbose(sql) results = cur.execute (sql) shlog.debug(results) # header colelction handling # 0th call to qd() is from contexts # the one that follows it directly is the one we can snatch column names from if passed_stanza.left_column_collections == 1: passed_stanza.left_columns_collector = list(map(lambda x: x[0], cur.description)) passed_stanza.left_column_collections += 1 return results
def delete_list(args): # return a list of html files to nuke shlog.normal("about to open %s", args.dbfile) con = sqlite3.connect(args.dbfile) curs = con.cursor() # this query returns all views that have their paths matched with args.searchterm sql = """SELECT v.Id FROM FOLDER f INNER JOIN VIEWS v on v.Parent_folder_id = f.Id WHERE f.Depth like '%PL4C3H0LD%'""".replace( 'PL4C3H0LD', args.searchterm) shlog.verbose(sql) curs.execute(sql) rows = curs.fetchall() view_list = [x[0] for x in rows] return view_list
def __init__(self, args, sql): self.context_list = [] if sql == None: # shim to support no context... Fix afer getting the chain to work. self.context_list = [{}] return con = sqlite3.connect(args.dbfile) cur = con.cursor() shlog.verbose(sql) self.results = [r for r in cur.execute(sql)] self.names = [d[0] for d in cur.description] shlog.debug("context list generated with names %s" % self.names) for row in self.results: d = {} for (key, item) in zip(self.names, row): d[key] = item self.context_list.append(d) shlog.verbose("new query context: %s", self.context_list)
def post_step_resource_assign(serv, usr, passw, step, name): resource = resource_id_from_name(serv, usr, passw, name) if resource: # check if the resource had already been assigned name = name.replace(',', '') name_first = name.split(' ')[0] name_last = name.split(' ')[-1] request_data = { 'Field': ['Text'], 'Filter': "UDFTypeObjectId = '158' and ForeignObjectId = '%s' and Text like '%%%s%%' and " "Text like '%%%s%%'" % (str(step), str(name_first), str(name_last)) } resp = m.soap_request(request_data, primaserver, 'UDFValueService', 'ReadUDFValues', primauser, primapasswd) if len(resp) > 0: # already assigned synched = 'already assigned to step #' + str(step) shlog.verbose(name + ' ' + synched) else: # actual post try: request_data = { 'Field': ['Name'], 'Filter': "ObjectId = '%s'" % str(resource) } synched = m.soap_request(request_data, serv, 'ResourceService', 'ReadResources', usr, passw) if len(synched) > 0: # if it's not None p6_name = synched[0]['Name'] else: p6_name = None baseline = m.actual_baseline(serv, usr, passw) resp = m.ticket_wipe(primaserver, primauser, primapasswd, step, 158) # just in case shlog.verbose('Posting name ' + p6_name + ' to step #' + str(step)) m.ticket_post(serv, usr, passw, step, baseline, p6_name, 158) except IndexError: return 'IndexError thrown, user not found' else: return name + ' not found in resources' return synched
def check_if_complete(self, project, issue): # statuses equivalent to "Completed" if 'ncsa' in self.server.lower(): jql = 'project = "%s" and id = "%s" and (status = "Closed" or status = "Resolved")' % ( project, issue) if 'lsst' in self.server.lower(): jql = """project = "%s" and id = "%s" and (status = "Done" or status = "Won't Fix")""" % ( project, issue) shlog.verbose('JQL: ' + jql) try: count = len(self.jira.search_issues(jql)) except jira.exceptions.JIRAError: # see: # https://jira.atlassian.com/browse/JRASERVER-23287?focusedCommentId=220596&page=com.atlassian.jira.plugin.system.issuetabpanels%3Acomment-tabpanel#comment-220596 count = 0 if count > 0: return True else: return False
def post_status(self, ticket, status): # check if we're already in the right status target_issue = self.get_issue(ticket) if target_issue: target_status = str(target_issue.fields.status) if target_status == status or target_status == "Won't Fix" or target_status == "Invalid": shlog.verbose(ticket + ' status ' + target_status + ' matches desired status ' + status + ', skipping...') return else: # retrieve transitions needed to get to desired status transitions = self.transitionX(target_status, status) for stat in transitions[1:]: shlog.verbose('Posting status ' + stat + ' to ticket ' + ticket) self.jira.transition_issue(ticket, transition=stat) else: return
def get_activity_scope(act_id, primaserver, user, pw): # attempt to retrieve the scope notebook record of the activity request_data = { 'Field': ['RawTextNote'], 'Filter': "ActivityObjectId = '%s' and NotebookTopicName = 'Scope'" % str(act_id) } shlog_list = '' for field in request_data['Field']: shlog_list += field + ', ' shlog.verbose( 'Making Primavera ActivityNoteService request for the description of activity ActivityId #' + str(act_id) + ', fields: ' + shlog_list[:-2]) act_note_api = soap_request(request_data, primaserver, 'ActivityNoteService', 'ReadActivityNotes', user, pw) if len(act_note_api) == 0: return None else: return h.html2text(act_note_api[0]['RawTextNote'])
def ingest_enclaves(args, sqldbfile): shlog.normal("about to open %s", args.dbfile) con = sqlite3.connect(args.dbfile) # note: ingest_enclaves connects to the same DB for source and output data con_temp = sqlite3.connect(args.dbfile) c_temp = con_temp.cursor() # the ingest_properties query retrieves properties from the archidump database sql = """SELECT DISTINCT e.Id, e.Name, REPLACE(e.Documentation,"'","''") as Documentation, e1.Name as Location /* Get all groupings from the Enclave FOLDER*/ FROM FOLDER f INNER JOIN ELEMENTS e on e.ParentFolder = f.Id and e.Type = 'Grouping' /* Get all locations by linking relations and then elements again*/ INNER JOIN RELATIONS r on r.Target = e.Id INNER JOIN ELEMENTS e1 on e1.ID = r.Source WHERE f.Name = 'Enclaves' AND e1.Type = 'Location'""" shlog.verbose(sql) c_temp.execute(sql) rows = c_temp.fetchall() enclaveTable.insert(con, rows) ingestTable.insert(con, [[iso_now(), args.dbfile, 'ENCLAVES']])
def check_if_open(self, project, issue): # statuses equivalent to "In Progress" if 'ncsa' in self.server.lower(): jql = 'project = "%s" and id = "%s" and (status = "Reopened" or status = "System Change Control" ' \ 'or status = "In Progress" or status = "Blocked" or status = "Waiting on User" ' \ 'or status = "Sleeping")' % (project, issue) if 'lsst' in self.server.lower(): jql = 'project = "%s" and id = "%s" and (status = "In Progress" or status = "In Review" ' \ 'or status = "Reviewed")' % (project, issue) shlog.verbose('JQL: ' + jql) try: count = len(self.jira.search_issues(jql)) except jira.exceptions.JIRAError: # see: # https://jira.atlassian.com/browse/JRASERVER-23287?focusedCommentId=220596&page=com.atlassian.jira.plugin.system.issuetabpanels%3Acomment-tabpanel#comment-220596 count = 0 if count > 0: return True else: return False
def index_cleaner(args): # delete matching folder class from html # load the file shlog.verbose('Parsing ' + args.apache + 'index.html') soup = bs(open(args.apache + 'index.html'), "html.parser") # find all spans spans = soup.findAll('span') shlog.verbose('Found ' + str(spans.__len__()) + ' spans') # decompose spans containing any mention of the search term for match in spans: if args.searchterm in match.text: shlog.normal('Deleting ' + str(match)) match.decompose() shlog.normal('Writing censored html to ' + args.apache + 'index.html') with open(args.apache + 'index.html', "w") as file: file.write(str(soup)) return