Example #1
0
def ingest_elements(args, sqldbfile):
    shlog.normal("about to open %s", sqldbfile)
    con = sqlite3.connect(args.dbfile)
    con_temp = sqlite3.connect(sqldbfile)
    c_temp = con_temp.cursor()
    # the ingest_elements query retrieves relevant elements from the archidump database
    sql = """/*Retrieve id and the most recent version of a model matched by name*/
                 /*desired_model should return one single row*/
                 WITH desired_model(id, version, created_on) AS (SELECT id, version, max(created_on) FROM models m WHERE m.name='%s' GROUP BY id),
                 /*Model stores all elements that ever existed in all the models, regardless of they exist in the recent versions*/
				 /*That's why we retrieve elements that match the model id+model version from desired_model*/
				 desired_model_elements(element_id, element_version, parent_folder_id) AS (SELECT element_id, element_version, parent_folder_id
                 FROM elements_in_model eim
                 INNER JOIN desired_model dm on dm.version=eim.model_version AND dm.id=eim.model_id)
                 /*With the correct element ids+versions identified, we can retrieve the matches from the elements table that has all the properties*/
				 SELECT e.id, e.class as Type, e.name, REPLACE(e.documentation,"'","''"), dme.parent_folder_id as ParentFolder
                 FROM elements e
                 INNER JOIN desired_model_elements dme on dme.element_id=e.id AND dme.element_version=e.version
                 """ % args.prefix
    shlog.verbose(sql)
    c_temp.execute(sql)
    rows = c_temp.fetchall()
    elementsTable.insert(con, rows)
    ingestTable.insert(con, [[iso_now(), sqldbfile, 'ELEMENTS']])
    con_temp.close()
Example #2
0
def ingest_relations(args, sqldbfile):
    shlog.normal("about to open %s", sqldbfile)
    con = sqlite3.connect(args.dbfile)
    con_temp = sqlite3.connect(sqldbfile)
    c_temp = con_temp.cursor()
    # the ingest_relations query retrieves relevant relations from the archidump database
    sql = """/*Retrieve id and the most recent version of a model matched by name*/
             /*desired_model should return one single row*/
             WITH desired_model(id, version, created_on) AS (SELECT id, version, max(created_on) FROM models m WHERE m.name='%s' GROUP BY id),
             /*Model stores all relations that ever existed in all the models, regardless of they exist in the recent versions*/
			 /*That's why we retrieve relations that match the model id+model version from desired_model*/
             desired_model_relations(relationship_id, relationship_version) AS (SELECT relationship_id, relationship_version
             FROM relationships_in_model rim
             INNER JOIN desired_model dm on dm.version=rim.model_version AND dm.id=rim.model_id
             /* Hotfix: remove relationship_id not found in views */
			 WHERE relationship_id in (SELECT DISTINCT relationship_id FROM views_connections))
             /*With the correct relations ids+versions identified, we can retrieve the matches from the relations table that has all the properties*/
             SELECT r.id, r.class as Type, r.name, REPLACE(r.documentation,"'","''"), r.source_id as source, r.target_id as Target
             FROM relationships r
             INNER JOIN desired_model_relations dmr on dmr.relationship_id=r.id AND dmr.relationship_version=r.version
             """ % args.prefix
    shlog.verbose(sql)
    c_temp.execute(sql)
    rows = c_temp.fetchall()
    relationsTable.insert(con, rows)
    ingestTable.insert(con, [[iso_now(), sqldbfile, 'RELATIONS']])
Example #3
0
def query(args):
    for r in qd(args, args.sql):
        shlog.normal("row: %s" % r)
        shlog.normal("type: %s" % type(r))
        shlog.normal("keys: %s" % r.keys())
        shlog.normal("contents: %s" %",".join([r[k] for k in r.keys()]))
    shlog.normal ("description: %s" % qdescription(args, args.sql))
Example #4
0
def multi_ticket_post(server, user, pw, code, Id, jira_id, activity=True):
    if activity:
        # request all activities with the same ID
        shlog.verbose('Making a request to find duplicates for activity Id ' +
                      Id)
        request_data = {
            'Field': ['ObjectId', 'ProjectObjectId'],
            'Filter': "Id = '%s'" % Id
        }
        dupes = soap_request(request_data, server, 'ActivityService',
                             'ReadActivities', user, pw)
    else:
        # the step request goes off name, because there's no id
        shlog.verbose('Making a request to find duplicates for step named ' +
                      Id)
        request_data = {
            'Field': ['ObjectId', 'ProjectObjectId'],
            'Filter': "Name = '%s'" % Id
        }
        dupes = soap_request(request_data, server, 'ActivityStepService',
                             'ReadActivitySteps', user, pw)
        if len(dupes) == 0:
            shlog.normal(
                'Critical error: Primavera returned no matches! This is caused by the activity name containing'
                ' single quotes. Please enter the Story ticket ID manually.')
            return None
    shlog.verbose('Primavera returned ' + str(len(dupes)) + ' duplicates')
    # loop through all of the object ids
    for entry in dupes:
        # delete call
        resp = ticket_wipe(server, user, pw, entry.ObjectId, code)
        # post call
        resp = ticket_post(server, user, pw, entry.ObjectId,
                           entry.ProjectObjectId, jira_id, code)
    return resp
Example #5
0
def get_trigger(args):
    # return a list of html files to nuke
    shlog.normal("about to open %s", args.dbfile)
    con = sqlite3.connect(args.dbfile)
    curs = con.cursor()
    # this query returns all views that have their paths matched with args.searchterm
    if args.search_term is None:
        sql = """SELECT Source as Id
                 FROM RELATIONS r
                 INNER JOIN ELEMENTS e1 on e1.ID = Source
                 INNER JOIN ELEMENTS e2 on e2.ID = Target
                 WHERE e1.Type = 'BusinessEvent' AND e2.Type = 'ApplicationProcess' AND r.Type = 'TriggeringRelationship'"""
    else:
        sql = """WITH triggers(Id) AS (SELECT Source as Id
                 FROM RELATIONS r
                 INNER JOIN ELEMENTS e1 on e1.ID = Source
                 INNER JOIN ELEMENTS e2 on e2.ID = Target
                 WHERE e1.Type = 'BusinessEvent' AND e2.Type = 'ApplicationProcess' AND r.Type = 'TriggeringRelationship')
                 SELECT DISTINCT Object_id as Id
                 FROM FOLDER f
                 INNER JOIN VIEWS v on v.Parent_folder_id = f.Id
                 INNER JOIN VIEW_OBJECTS vo on v.Id = vo.View_id
				 INNER JOIN triggers t on t.Id = vo.Object_id
				 WHERE f.Depth like '%F1LL3R%'""".replace('F1LL3R', str(args.search_term))
    shlog.verbose(sql)
    curs.execute(sql)
    rows = curs.fetchall()
    trigger_list = [x[0] for x in rows]
    return trigger_list
Example #6
0
def mk_node_plateau(args):
    """
    Make table to nodes and plateaus
    #plateau is source  e.g. 62afd6a9-1915-4220-ac50-21bc39cb69a5 
    #node is target     e.g. e9903810-361f-42fb-a1bd-f196f2ff25f3
    """

    sql = """
           CREATE TABLE NODE_PLATEAU as 
           SELECT
             relations.source   Pla_id,
             relations.target   Node_id,
             relations.type     Rel_type,
             e1.name            Pla_name,
             e2.name            Node_name

           FROM
              relations
          JOIN  elements e1
            ON (e1.id = relations.source)
          JOIN  elements e2
          ON (e2.id = relations.target)
          Where 
             e1.type = "Plateau"
             AND
             relations.type = "CompositionRelationship"
             AND
             (e2.type = "Node"  OR e2.type = "Equipment"  OR e2.type = "Path") 

    """
    shlog.normal("Making  plateau node table ")
    q(args, sql)
Example #7
0
def post_step_complete(server, user, pw, ObjectId, complete=True):
    url = server + '/p6ws/services/ActivityStepService?wsdl'
    body = """<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:v1="http://xmlns.oracle.com/Primavera/P6/WS/UDFValue/V1">
       <soapenv:Header>
        <wsse:Security xmlns:wsse="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd">
          <wsse:UsernameToken>
            <wsse:Username>%s</wsse:Username>
            <wsse:Password Type="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-username-token-profile-1.0#PasswordText">%s</wsse:Password>
          </wsse:UsernameToken>
        </wsse:Security>
      </soapenv:Header>
       <soapenv:Body>
          <v1:UpdateActivitySteps>
             <v1:ActivityStep>
                <v1:ObjectId>%s</v1:ObjectId>
                <v1:IsCompleted>%s</v1:IsCompleted>
             </v1:ActivityStep>
          </v1:UpdateActivitySteps>
       </soapenv:Body>
    </soapenv:Envelope>""" % (user, pw, ObjectId, str(complete))

    shlog.normal('Post Step ID ' + str(ObjectId) + ' Complete as ' +
                 str(complete) + ' to Primavera server ' + server)
    response = requests.post(url, verify=False, data=body)
    return response.content
Example #8
0
def ingest_folders(args, sqldbfile):
    shlog.normal("about to open %s", sqldbfile)
    con = sqlite3.connect(args.dbfile)
    con_temp = sqlite3.connect(sqldbfile)
    c_temp = con_temp.cursor()
    c_temp.execute(
        """WITH desired_model(id, version, created_on) AS (SELECT id, version, max(created_on) FROM models m WHERE m.name='%s' GROUP BY id),

                      allfolders(id, parent_id, type, Name, Documentation) AS (SELECT folder_id as id, parent_folder_id as parent_id, type, Name, Documentation
                      FROM folders_in_model fim
                      INNER JOIN desired_model dm on dm.version=fim.model_version AND dm.id=fim.model_id
					  INNER JOIN folders f on f.id=fim.folder_id AND f.version=fim.folder_version),
					  
                      depths(id, name, depth) AS (
                      SELECT id, Name, type as depth
                      FROM allfolders
                      WHERE parent_id IS NULL
                    
                      UNION ALL
                    
                      SELECT allfolders.id, allfolders.Name, cast(depths.depth as text)|| '.' || cast(allfolders.name as text) as depth
                      FROM allfolders
                      JOIN depths ON allfolders.parent_id = depths.id
                      ) 
                      SELECT af.id, af.parent_id, af.type, af.Name, af.Documentation, d.depth
                      FROM allfolders as af
                              INNER JOIN depths as d on d.id=af.id
                      """ % args.prefix)
    rows = c_temp.fetchall()
    folderTable.insert(con, rows)
    ingestTable.insert(con, [[iso_now(), sqldbfile, 'FOLDER']])
Example #9
0
def post_act_complete(server, user, pw, ObjectId, status):
    # possible status values: "Completed", "In Progress", "Not Started"
    request_data = {'Activity': {'ObjectId': ObjectId, 'Status': status}}
    shlog.normal('Making request to change status to ' + status)
    synched = m.soap_request(request_data, server, 'ActivityService',
                             'UpdateActivities', user, pw)
    return synched
Example #10
0
def mk_policy_to_file(args):
    """
    aake a convenience tabel linking ddad managemnt policies to files managed by the policies
    """
    sql = """
      CREATE TABLE
           POLICY_TO_FILE AS  
       SELECT
          contract.name policy_name ,
          file.name file_name,
          contract.id policy_id,
          association.target file_id,
          access.name access_type
      FROM
               elements contract
          JOIN relations association
          JOIN elements applicationprocess
          JOIN relations access
          JOIN elements file 
       WHERE
          (
                contract.type='Contract'
            AND association.type='AssociationRelationship'
            AND contract.id=association.source
            AND association.target=applicationprocess.id
            AND access.source=applicationprocess.id
            AND access.target=file.id

        )

 """
    shlog.normal("Making table linking DM policy to data files  ")
    q(args, sql)
Example #11
0
def ingest_connections(args, sqldbfile):
    shlog.normal("about to open %s", sqldbfile)
    con = sqlite3.connect(args.dbfile)
    con_temp = sqlite3.connect(sqldbfile)
    c_temp = con_temp.cursor()
    # the ingest_properties query retrieves properties from the archidump database
    sql = """/*Retrieve id and the most recent version of a model matched by name*/
             /*desired_model should return one single row*/
             WITH desired_model(id, version, created_on) AS (SELECT id, version, max(created_on) FROM models m WHERE m.name='%s' GROUP BY id),
             /*Model stores all views that ever existed in all the models, regardless of they exist in the recent versions*/
             /*That's why we retrieve the views that match the model id+model version from desired_model*/
             desired_views(view_id, view_version, parent_folder_id) AS (SELECT view_id, view_version, parent_folder_id
             FROM views_in_model vim
             INNER JOIN desired_model dm on dm.version=vim.model_version AND dm.id=vim.model_id),
			 /* Now that we have the most recent view versions from the most recent model, we can get the most recent connections */
			 desired_connections(connection_id, connection_version, view_version, view_id) AS (SELECT DISTINCT vciv.connection_id, vciv.connection_version, vciv.view_version, vciv.view_id 
			 FROM desired_views dv
			 INNER JOIN views_connections_in_view vciv on vciv.view_id = dv.view_id AND vciv.view_version = dv.view_version)
			 SELECT vc.id as connection_id, dc.view_id, vc.relationship_id
			 FROM views_connections vc
			 INNER JOIN desired_connections dc on vc.id=dc.connection_id AND vc.version = dc.connection_version""" % args.prefix
    shlog.verbose(sql)
    c_temp.execute(sql)
    rows = c_temp.fetchall()
    connectionsTable.insert(con, rows)
    ingestTable.insert(con, [[iso_now(), sqldbfile, 'CONNECTIONS']])
Example #12
0
    def transitionX(self, start, desired):
        wf_url = self.workflow.replace(' ', '%20')
        url = self.server + "/rest/projectconfig/1/workflow?workflowName=" + wf_url + "&projectKey=" + self.project
        auth = HTTPBasicAuth(self.user, self.pw)
        headers = {"Accept": "application/json"}
        response = requests.request("GET", url, headers=headers, auth=auth)

        # networkx starts here
        nodes = []
        edges = []
        workflow = json.loads(response.text)
        for source in workflow['sources']:
            source_name = source['fromStatus']['name']
            # print(source_name)
            # add node to node list
            nodes.append(source_name)
            for target in source['targets']:
                target_name = target['toStatus']['name']
                # print('     ' + target_name)
                # add edge to edge list
                edges.append((source_name, target_name))
            # print('______')

        # now that we have the nodes and edges, we can construct the
        g = nx.DiGraph()
        g.add_nodes_from(nodes)
        g.add_edges_from(edges)
        try:
            return nx.shortest_path(g, start, desired)
        except nx.exception.NetworkXNoPath:
            shlog.normal('No path found between ' + start + ' and ' + target)
            return None
Example #13
0
 def get_issue(self, key):
     try:
         issue_info = self.jira.issue(key)
     except jira.exceptions.JIRAError:
         # triggered by issue not existing
         shlog.normal(key + " not found!")
         return None
     return issue_info
Example #14
0
def era_ingest(args):
    """ Ingest eras from the supplied path
        unlike ingest, this uses the prefix, such as LSST """
    vault_files = os.path.join(archi_interface.cachepath(args),
                               args.prefix + "_*.csv")
    shlog.normal("looking into vault for %s", vault_files)
    for v in glob.glob(vault_files):
        ingest_era_content(args, v)
Example #15
0
def list(args):
    """get ELEMENTS content"""
    con = sqlite3.connect(args.dbfile)
    cur = con.cursor()
    q = "select * from ELEMENTS"
    shlog.normal(q)
    rows = []
    for c in cur.execute(q):
        rows.append(c)
    print(tabulate.tabulate(rows))
Example #16
0
def forget(args):
    """Clean the file that remembers all the archimate files you ever opened"""
    file = os.path.join(os.environ["HOME"],
                        "Library/Application Support/Archi4/models.xml")

    if os.path.isfile(file):
        shlog.normal("deleting: %s" % file)
        os.remove(file)
    else:
        shlog.normal("%s did not exist" %  file)
Example #17
0
def ingest_era_content(args, csvfile):
    shlog.normal("about to open %s", args.dbfile)
    con = sqlite3.connect(args.dbfile)
    rows = []
    with open(csvfile) as f:
        rows = [tuple(line) for line in csv.reader(f)]
    del rows[:1]
    f.close()
    eraTable.insert(con, rows)
    ingestTable.insert(con, [[iso_now(), args.dbfile, 'ERAS']])
Example #18
0
def qd(args, sql):
    #return results of query as a list of dictionaries,
    #one for each row. 
    
    con = sqlite3.connect(args.dbfile)
    con.row_factory = sqlite3.Row
    cur = con.cursor()
    shlog.verbose(sql)
    results = cur.execute (sql)
    shlog.normal(results)
    return results
Example #19
0
def ingest_view_objects(args, sqldbfile):
    shlog.normal("about to open %s", sqldbfile)
    con = sqlite3.connect(args.dbfile)
    con_temp = sqlite3.connect(sqldbfile)
    c_temp = con_temp.cursor()
    # the ingest_properties query retrieves properties from the archidump database
    sql = """/*Retrieve id and the most recent version of a model matched by name*/
            /*desired_model should return one single row*/
            WITH desired_model(id, version, created_on) AS (SELECT id, version, max(created_on) FROM models m WHERE m.name='%s' GROUP BY id),
            /*Model stores all objects that ever existed in all the models, regardless of they exist in the recent versions*/
            /*That's why we retrieve the view objects that match the model id+model version from desired_model*/
            desired_views(view_id, view_version) AS (SELECT view_id, view_version
            FROM views_in_model vim
            INNER JOIN desired_model dm on dm.version=vim.model_version AND dm.id=vim.model_id),
            /*objects are versioned as well, so we get their ids too*/
            objects_in_view(view_id, view_version, object_id, object_version) AS (SELECT DISTINCT dv.view_id, dv.view_version, voiv.object_id, voiv.object_version
            FROM desired_views dv
            INNER JOIN views_objects_in_view voiv on voiv.view_id=dv.view_id AND voiv.view_version=dv.view_version
            ),
            /*With the correct view object ids+versions identified, we can retrieve the matches from the views_objects table that have all the properties*/
            /* the first round is to get objects that are not inside other objects ("root objects") */
            null_view_objects(id, version, class, container_id, element_id, name, content) AS (SELECT vo.id, vo.version, vo.class, vo.container_id, vo.element_id, name, content
            FROM objects_in_view oiv
            INNER JOIN views_objects vo on vo.container_id=oiv.view_id AND vo.id=oiv.object_id AND vo.version=oiv.object_version),
            /* With root elements now identified, we can loop join view_objects where object_id is the container id */
            depths(id, version, class, container_id, root_view_id, element_id, object_id, name, content) AS (
            SELECT id, version, class, container_id, container_id as root_view_id, element_id, id as object_id, name, content
            FROM null_view_objects 
            
            UNION ALL
            
            SELECT vo.id, vo.version, vo.class, vo.container_id, depths.root_view_id, vo.element_id, vo.id as object_id, vo.name, vo.content
            FROM views_objects vo
            /* Below is the most important line in the entire query*/
            JOIN depths on vo.container_id = depths.id
            ),
            /* After this looped JOIN, filter the relevant objects with the help of  objects_in_view that has proper object=>view relation */
            desired_objects(view_id, element_id, object_id, class, name, content, container_id) AS (SELECT DISTINCT oiv.view_id, depths.element_id, depths.object_id, depths.class, depths.name, depths.content, depths.container_id
            FROM depths
            INNER JOIN objects_in_view oiv on oiv.object_id = depths.id
            AND oiv.object_version = depths.version
            AND oiv.view_id = depths.root_view_id)
			/* container_ids that are enclaves are still using their object_ids intead of element_ids. Time to fix that  */
			SELECT DISTINCT do.view_id, do.element_id as Object_id, do.class, do.name, do.content, IFNULL(do2.element_id, do.container_id) as container_id
			FROM desired_objects do
			LEFT JOIN desired_objects do2 on do.container_id = do2.object_id
             """ % args.prefix
    shlog.verbose(sql)
    c_temp.execute(sql)
    rows = c_temp.fetchall()
    viewobjectsTable.insert(con, rows)
    ingestTable.insert(con, [[iso_now(), sqldbfile, 'VIEW_OBJECTS']])
Example #20
0
def vpn_toggle(switch):
    """issue an OS command to toggle AnyConnect VPN

    :param switch: bool
    :return: None
    """
    import kalm as k
    if switch:
        shlog.normal('Engaging VPN...')
        k.engage()
    if not switch:
        shlog.normal('Disabling VPN...')
        k.disengage()
Example #21
0
def acquire(args):
      """Copy CSV files from the export area to the local cache"""
      for file in ["elements.csv","relations.csv","properties.csv"]: 
            ffrom = os.path.join(os.environ["HOME"],'export',args.prefix + file)
            fto = os.path.join(cachepath(args),args.prefix + file)
            shutil.copyfile(ffrom, fto)
            shlog.normal("cached: %s to %s" % (ffrom, fto))
            """
            if abs(os.path.getmtime(ffrom) - os.path.getmtime(args.archifile)) > 5*60:
                shlog.warning("CSV file and archimate files differ by more than five minutes")
                shlog.warning("************  DID YOU EXPORT PROPERLY????? ")
            """
      acquire_archimate(args)
Example #22
0
def cachepath(args):
    """return a path to cache dir appropriate for export prefix

       e.g for prefix DES_ make a cache/DES_ directory if needed
       and return that path to the caller.
    """
    directory = os.path.join(VAULT_ROOT)
    try:
            os.stat(directory)
    except:
            os.mkdir(directory)
            shlog.normal("made directory %s" % directory)
    return directory
Example #23
0
def ingest_folder_elements(args, sqldbfile):
    shlog.normal("about to open %s", sqldbfile)
    con = sqlite3.connect(args.dbfile)
    con_temp = sqlite3.connect(sqldbfile)
    c_temp = con_temp.cursor()
    c_temp.execute(
        """WITH desired_model(id, version, created_on) AS (SELECT id, version, max(created_on) FROM models m WHERE m.name='%s' GROUP BY id)
                      SELECT parent_folder_id, element_id
                      FROM elements_in_model eim
                      INNER JOIN desired_model dm on dm.version=eim.model_version AND dm.id=eim.model_id"""
    )
    rows = c_temp.fetchall()
    folder_elementsTable.insert(con, rows)
    ingestTable.insert(con, [[iso_now(), sqldbfile, 'FOLDER_ELEMENTS']])
Example #24
0
def mkserving(args):
    """
    Build a table, SERVING, that compiles the relationship of one application component
    providing servince to another  application component. Thsi table Hides the fact
    that in the modeling tool this relationship is expressed though an application

    interface component.  The SERVING TABLE will allow a recursive query discovering
    the chain of service dependencies.
    """

    # ok I know this seems baroque. It too some time for a newbie to figure this
    # large Join out, since it is a join of joins. composing the query from other
    # Query strings lets me debug all this join-by-join.

    #Get the Application components that are served by an interface
    served = """
    select R1.target, R1.source, E1.name,  E1.id  
               from relations R1
            JOIN 
                Elements E1 on R1.Target = E1.ID
            where E1.type = 'ApplicationComponent'
              and R1.type = 'ServingRelationship'

    """
    #get the application components that provide the interface
    providing_service = """
    select R2.Target, R2.Source, E2.name,  E2.id 
               from relations R2
            JOIN 
                Elements E2 on R2.Source = E2.ID
            where E2.type = 'ApplicationComponent'
              and R2.type = 'CompositionRelationship'
     """

    sql = """  CREATE TABLE
                  Serving AS
               SELECT
                   R1.name  Served_Name,
                   R1.id    Served_ID,
                   R2.name  Serving_name,
                   R2.id    Serving_ID
                 FROM
                   (%s) R1
                 JOIN
                   (%s) R2
                 ON R2.target = R1.source 

    """ % (served, providing_service)
    shlog.normal("Making service associative table ")
    q(args, sql)
Example #25
0
 def mkTable(self, con):
     # make the schemas for the main database tables
     # the schema can be loaded with data from subsequent calls of this program.
     cur = con.cursor()
     columns = [
         "%s %s" % (name, dbtype)
         for (name, dbtype) in zip(self.columns, self.hdt)
     ]
     columns = (',').join(columns)
     create_statement = "create table " + self.tableName + " (" + columns + ')'
     shlog.normal(create_statement)
     cur.execute(create_statement)
     con.commit()
     return
Example #26
0
def ingest_enclave_content(args, sqldbfile):
    shlog.normal("about to open %s", args.dbfile)
    con = sqlite3.connect(args.dbfile)
    # note: ingest_enclave_content connects to the same DB for source and output data
    con_temp = sqlite3.connect(args.dbfile)
    c_temp = con_temp.cursor()
    # the ingest_properties query retrieves properties from the archidump database
    sql = """SELECT DISTINCT r.Target as Object_id, e.Id as Enclave_id
             FROM RELATIONS r
             INNER JOIN ENCLAVES e on e.Id = r.Source"""
    shlog.verbose(sql)
    c_temp.execute(sql)
    rows = c_temp.fetchall()
    enclavecontentTable.insert(con, rows)
    ingestTable.insert(con, [[iso_now(), args.dbfile, 'ENCLAVE_CONTENT']])
Example #27
0
def ingest_properties(args, sqldbfile):
    shlog.normal("about to open %s", sqldbfile)
    con = sqlite3.connect(args.dbfile)
    con_temp = sqlite3.connect(sqldbfile)
    c_temp = con_temp.cursor()
    c_temp.execute(
        """WITH properties_prep(parent_ID, name, value, max_parent_verion) as (SELECT parent_ID, name, value, max(parent_version) as max_parent_verion
                      FROM properties
                      GROUP BY parent_ID, name, value)
                      SELECT parent_ID, name, value
                      FROM properties_prep
                      """)
    rows = c_temp.fetchall()
    propertiesTable.insert(con, rows)
    ingestTable.insert(con, [[iso_now(), sqldbfile, 'PROPERTIES']])
Example #28
0
def delete_list(args):
    # return a list of html files to nuke
    shlog.normal("about to open %s", args.dbfile)
    con = sqlite3.connect(args.dbfile)
    curs = con.cursor()
    # this query returns all views that have their paths matched with args.searchterm
    sql = """SELECT v.Id
             FROM FOLDER f
             INNER JOIN VIEWS v on v.Parent_folder_id = f.Id
             WHERE f.Depth like '%PL4C3H0LD%'""".replace(
        'PL4C3H0LD', args.searchterm)
    shlog.verbose(sql)
    curs.execute(sql)
    rows = curs.fetchall()
    view_list = [x[0] for x in rows]
    return view_list
Example #29
0
def ingest_relations(args, sqldbfile):
    shlog.normal("about to open %s", sqldbfile)
    con = sqlite3.connect(args.dbfile)
    con_temp = sqlite3.connect(sqldbfile)
    c_temp = con_temp.cursor()
    c_temp.execute(
        """WITH desired_model(id, version, created_on) AS (SELECT id, version, max(created_on) FROM models m WHERE m.name='%s' GROUP BY id),
                      desired_model_relations(relationship_id, relationship_version) AS (SELECT relationship_id, relationship_version
                      FROM relationships_in_model rim
                      INNER JOIN desired_model dm on dm.version=rim.model_version AND dm.id=rim.model_id)
                      SELECT r.id, null as Type, r.name, r.documentation, r.source_id as source, r.target_id as Target
                      FROM relationships r
                      INNER JOIN desired_model_relations dmr on dmr.relationship_id=r.id AND dmr.relationship_version=r.version
                      """ % args.prefix)
    rows = c_temp.fetchall()
    relationsTable.insert(con, rows)
    ingestTable.insert(con, [[iso_now(), sqldbfile, 'RELATIONS']])
Example #30
0
def dbinfo(args):
    """Print summary information about database content"""
    shlog.normal("about to open %s", args.dbfile)
    l = []
    sqls = [
        ["Number of Elements", "Select count(*) from ELEMENTS"],
        ["Number of Relations", "Select count(*) from RELATIONS"],
        ["Number of Properties", "Select count(*) from PROPERTIES"],
    ]
    for item, sql in sqls:
        l.append([item, q(args, sql).fetchone()[0]])

    # now ingest infor from CSV's
    sql = "Select * from INGESTS"
    for result in q(args, sql):
        l.append(["sqlite", result])
    print(tabulate.tabulate(l, ["Item", "Value"]))