Exemple #1
0
def analyse(**kwargs):
    requestData = kwargs['requestData']
    app_name = kwargs['app_name']
    query_groups, query_results = get_queries(requestData, app_name)
    
    #establish connection
    cursor = connection.cursor()
    
    #execute query_groups get the logical groups of the data
    cursor.execute(query_groups)
    db_result = cursor.fetchall()

    results = []
    for r in db_result:
        job_id = r[0]
        app_version = r[1]

        result = {"app_version": app_version}
        job = Job.objects.get(pk=job_id)
        dbfiles = ResultFile.objects.filter(job=job)
        files = []
        for file in dbfiles:
            files.append(file.file)
        result["files"] = files
        results.append(result)

    return {"results": results} 
def describeQueries (model):
    '''Returns a string describing the test queries used in testing a data model'''

    out = "-" * 80 + "\n"

    for q in query_builder.get_queries(model):
        out += describeQuery(q) + "\n" + "-" * 80 + "\n"
        
    return out
def describeQueries (model):
    '''Returns a string describing the test queries used in testing a data model'''

    out = "-" * 80 + "\n"

    for q in query_builder.get_queries(model):
        out += describeQuery(q) + "\n" + "-" * 80 + "\n"
        
    return out
Exemple #4
0
def analyse(**kwargs):
    requestData = kwargs['requestData']
    app_name    = kwargs['app_name']

    axis = "unknown unit"
    
    #if request.method == 'GET' and 'hosts' in request.GET and 'jobdes' in request.GET and 'platforms' in request.GET and 'atr' in request.GET:
    #fetch the right queries depending on user's choices no the request
    query_results = get_queries(requestData, app_name)

    #establish connection
    cursor = connection.cursor()
                
    #then execute the next query_results to fetch the results
    cursor.execute(query_results)
    cursor_description = cursor.description
    
    groups = GroupDict()

    result = cursor.fetchone()
    while not result == None:
        group = tuple(result[:-4])
        if result[-3] != None and axis == "unknown unit":
            axis  = result[2]
        groups[group].append(tuple(result[-4:]))
        
        result = cursor.fetchone()
    
    trends = []
    for g,values in groups.iteritems():
        dataDict = {}
        datatable_temp = []
        for res in values:
            # -4(version), -3(average value), -2(std), -1 entries
            error = float(res[-2])/ math.sqrt( float(res[-1]) ) 
            version = res[-4]
            down_error = float(res[-3]) - error
            up_error = float(res[-3]) + error
            down_value = float(res[-3]) - float(res[-2])
            up_value = float(res[-3]) + float(res[-2])
            
            datatable_temp.append([ '{0}({1})'.format(version,res[-1]), down_value, down_error, up_error, up_value, 'Average: {0}, -+{1}'.format(res[-3],res[-2]) ])
        datatable_temp2 = sorted(datatable_temp, key = lambda t : getSplitted(t[0]))
        
        saved_index = None
        only_heads = True
        for index, dat in enumerate(datatable_temp2): 
            saved_index = index
            if dat[0].startswith('v'):
                only_heads = False
                break
        
        datatable = []
        if not only_heads:
            datatable.extend(datatable_temp2[saved_index:])
            datatable.extend(datatable_temp2[:saved_index])
        else:
            datatable = datatable_temp2
        
        dataDict['description']  = dict(zip([col[0] for col in cursor.description[:-4]], g))
        dataDict['platform']     = dataDict['description']['PLATFORM']
        dataDict['datatable']    = datatable
      	dataDict['axis']         = axis
        
        trends.append(dataDict)
        
    return { 'trends': json.dumps(trends) }
def testRDF (graph, model):
    '''Service method testing a graph against a data model from the ontology
    
    Returns a string containing the applicable error messages or an empty
    string when no pronlems have been found.'''

    # Vars for tracking of the test state
    message = ""

    # Generate the queries
    queries = query_builder.get_queries(model)
    
    # Iterate over the queries
    for query in queries:
    
        # Get the query type and the query string
        type = query["type"]
        q = query["query"]
        desc = query["description"]
        
        # Negative queries should not return any results
        if type == "negative":
            # Run the query and report any failures
            try:
                results = graph.query(q)
            except:
                print "problem with QUERY!"
                print q
                
            
            # Stingify the results (limit to first 3)
            # This is needed to work around a bug in rdflib where the non-matched results
            # are still return as "none" objects
            myres = []
            for r in results:
                if len (myres) < 3 and r:
                    myres.append(str(r))
            
            # If we get a result (the queries are assumed to be negative),
            # then we should build a failure message about the event
            if len(myres) > 0 :
                if len(message) == 0:
                    message = "RDF structure check failed\n"
                message += "\nUnexpected results (first 3 shown):\n"
                for m in myres:
                    message += "   " + m + "\n"
                message += describeQuery(query) + "\n"
        
        # The results of match queries should match one of the constraints       
        elif type == "match":       
            # Run the query and report any failures
            results = graph.query(q)
            
            # Lists the first 3 unmatched results
            unmatched_results = []

            # With each result
            for r in results:
            
                # Assume unmatched until a match is found
                matched = False
                
                # Build a tuple from the fields in the result
                res = (str(r[0]).lower(),str(r[1]).lower(),str(r[2]).lower(),str(r[3]).lower(),str(r[4]).lower())
                
                # With each constraint
                for c in query["constraints"]:
                
                    # Match the result against the constraint via case-insensitive matching
                    con = (c['uri'].lower(), c['code'].lower(), c['identifier'].lower(), c['title'].lower(), c['system'].lower())
                    if res == con:
                        matched = True
                
                # Add the unmatched result as a string to the list of unmatched results (limit is 3)
                if not matched and len (unmatched_results) < 3:
                    out = "    {\n"
                    out += '        "code": ' + str(r[1]) + ',\n'
                    out += '        "identifier": ' + str(r[2]) + ',\n'
                    out += '        "system": ' + str(r[4]) + ',\n'
                    out += '        "title": ' + str(r[3]) + ',\n'
                    out += '        "uri": ' + str(r[0]) + ',\n'
                    out += "    }"
                    unmatched_results.append(out)
            
            # Finally, if we have any unmatched results, we should construct a failure message about them
            if len (unmatched_results) > 0:
                if len(message) == 0:
                    message = "RDF structure check failed\n"
                message += "\nInvalid results (first 3 shown):\n"
                message += ",\n".join(unmatched_results) + "\n"
                message += describeQuery(query) + "\n"
                    
        # Singular queries test for violations of "no more than 1" restrictions.
        # There should be no duplicates in the result set
        elif type == "noduplicates":
            
            # Run the query and report any failures
            results = graph.query(q)
            
            # Stingify the results
            myres = []
            for r in results:
                if len (myres) < 3 and r:
                    myres.append(str(r))
            
            # Find the first 3 duplicates in the result set
            checked = []
            duplicates = []
            for s in myres:
                if s not in checked:
                    checked.append (s)
                elif len(duplicates) < 3:
                    duplicates.append (s)
                  
            # Set up a failure message when we have duplicates
            if len(duplicates) > 0:
                
                if len(message) == 0:
                    message = "RDF structure check failed\n"
                message += "\nUnexpected duplicates (first 3 shown):\n"
                for m in duplicates:
                    message += "   " + m + "\n"
                message += describeQuery(query) + "\n"
                 
    # Return the failure message
    return message
def testRDF (graph, model):
    '''Service method testing a graph against a data model from the ontology
    
    Returns a string containing the applicable error messages or an empty
    string when no pronlems have been found.'''

    # Vars for tracking of the test state
    message = ""

    # Generate the queries
    queries = query_builder.get_queries(model)
    
    # Iterate over the queries
    for query in queries:
    
        # Get the query type and the query string
        type = query["type"]
        q = query["query"]
        desc = query["description"]
        
        # Negative queries should not return any results
        if type == "negative":
            # Run the query and report any failures
            results = graph.query(q)
            
            # Stingify the results (limit to first 3)
            # This is needed to work around a bug in rdflib where the non-matched results
            # are still return as "none" objects
            myres = []
            for r in results:
                if len (myres) < 3 and r:
                    myres.append(str(r))
            
            # If we get a result (the queries are assumed to be negative),
            # then we should build a failure message about the event
            if len(myres) > 0 :
                if len(message) == 0:
                    message = "RDF structure check failed\n"
                message += "\nUnexpected results (first 3 shown):\n"
                for m in myres:
                    message += "   " + m + "\n"
                message += describeQuery(query) + "\n"
        
        # The results of match queries should match one of the constraints       
        elif type == "match":       
            # Run the query and report any failures
            results = graph.query(q)
            
            # Lists the first 3 unmatched results
            unmatched_results = []

            # With each result
            for r in results:
            
                # Assume unmatched until a match is found
                matched = False
                
                # Build a tuple from the fields in the result
                res = (str(r[0]).lower(),str(r[1]).lower(),str(r[2]).lower(),str(r[3]).lower(),str(r[4]).lower())
                
                # With each constraint
                for c in query["constraints"]:
                
                    # Match the result against the constraint via case-insensitive matching
                    con = (c['uri'].lower(), c['code'].lower(), c['identifier'].lower(), c['title'].lower(), c['system'].lower())
                    if res == con:
                        matched = True
                
                # Add the unmatched result as a string to the list of unmatched results (limit is 3)
                if not matched and len (unmatched_results) < 3:
                    out = "    {\n"
                    out += '        "code": ' + str(r[1]) + ',\n'
                    out += '        "identifier": ' + str(r[2]) + ',\n'
                    out += '        "system": ' + str(r[4]) + ',\n'
                    out += '        "title": ' + str(r[3]) + ',\n'
                    out += '        "uri": ' + str(r[0]) + ',\n'
                    out += "    }"
                    unmatched_results.append(out)
            
            # Finally, if we have any unmatched results, we should construct a failure message about them
            if len (unmatched_results) > 0:
                if len(message) == 0:
                    message = "RDF structure check failed\n"
                message += "\nInvalid results (first 3 shown):\n"
                message += ",\n".join(unmatched_results) + "\n"
                message += describeQuery(query) + "\n"
                    
        # Singular queries test for violations of "no more than 1" restrictions.
        # There should be no duplicates in the result set
        elif type == "noduplicates":
            
            # Run the query and report any failures
            results = graph.query(q)
            
            # Stingify the results
            myres = []
            for r in results:
                if len (myres) < 3 and r:
                    myres.append(str(r))
            
            # Find the first 3 duplicates in the result set
            checked = []
            duplicates = []
            for s in myres:
                if s not in checked:
                    checked.append (s)
                elif len(duplicates) < 3:
                    duplicates.append (s)
                  
            # Set up a failure message when we have duplicates
            if len(duplicates) > 0:
                
                if len(message) == 0:
                    message = "RDF structure check failed\n"
                message += "\nUnexpected duplicates (first 3 shown):\n"
                for m in duplicates:
                    message += "   " + m + "\n"
                message += describeQuery(query) + "\n"
                 
    # Return the failure message
    return message
Exemple #7
0
def analyse(**kwargs):
    requestData = kwargs['requestData']
    app_name = kwargs['app_name']
    #if request.method == 'GET' and 'hosts' in request.GET and 'jobdes' in request.GET and 'platforms' in request.GET and 'atr' in request.GET:
    #fetch the right queries depending on user's choices no the request
    query_groups, query_results = get_queries(requestData, app_name)
    
    #return {'template' : 'analysis/debug.html', 
    #        'query_groups' : query_groups , 'query_results' : query_results }
    
    #establish connection
    cursor = connection.cursor()
    
    #execute query_groups get the logical groups of the data
    cursor.execute(query_groups)
    logical_data_groups = cursor.fetchall()
    
    if len(logical_data_groups) == 0: 
        return {'errorMessage' : 'No results found for your choices.',
                'template' : 'analysis/error.html' }
    if len(logical_data_groups) < 2:
        if requestData['hist_divided'] == 'true':
            return {'errorMessage' : 'Your choices produce less than 2 results!\nCan not divide 1 histogram.',
                'template' : 'analysis/error.html' }
    if len(logical_data_groups) > 2:
        if requestData['hist_divided'] == 'true':
            return {'errorMessage' : 'Your choices produce more than 2 results!\nCan not divide more than 2 histograms.',
                'template' : 'analysis/error.html' }
    if len(logical_data_groups) > 3:
        if requestData['hist_imposed'] == 'true':
            return {'errorMessage' : 'Your choices produce more than 3 results!\nCan not impose more than 3 histograms.',
                'template' : 'analysis/error.html' }
    
    if requestData['hist_separated'] == 'true':
        hist_separated = True
    else:
        hist_separated = False
    if requestData['hist_imposed'] == 'true':
        hist_imposed = True
    else:
        hist_imposed = False
    if requestData['hist_divided'] == 'true':
        hist_divided = True
    else:
        hist_divided = False
    if requestData['hist_divided_reversed'] == 'true':
        hist_divided_reversed = True
    else:
        hist_divided_reversed = False
        
          
    #then execute the next query_results to fetch the results
    cursor.execute(query_results)
    #fixing the request in order to send it properly through socket
    #can not serialize straightforward the request as it is
    requestDict = {
                'atr_name' : requestData['atr'].split(',')[0],
               'atr_path' : requestData['atr'].split(',')[1], 
               'description' : [col[0] for col in cursor.description],
               'path_to_files' : settings.MEDIA_ROOT,
               'hist_separated' : hist_separated,
               'hist_imposed' : hist_imposed,
               'hist_divided' : hist_divided,
               'hist_divided_reversed' : hist_divided_reversed,
               'hist_options' : settings.HISTOGRAMSGAUSSOPTIONS 
               }
    #initialize our remote service
    remoteservice = remoteService()
    #in case it does not connect return an error
    if not remoteservice.connect():
        logger.error('Could not connect to remote service')
        return {'errorMessage' : 'Connection with remote service for analysis failed!',
                'template' : 'analysis/error.html' }
        
    try:
        remoteservice.send('histograms_service')
        #send to ROOT_service(remote service) histogram info from user's request
        remoteservice.send(requestDict)
        groups = {}
    
        result = cursor.fetchone()
        while not result == None:
            group = tuple(result[:-1])
            if not group in groups:
                groups[group] = len(groups)
                remoteservice.send(('NEWGROUP', groups[group], group))
            remoteservice.send((groups[group], result[-1]))
            result = cursor.fetchone()
        #send a message to stop waiting for other packets    
        remoteservice.send('STAHP')
        
        #after we finish sending our data we wait for the response(answer)
        answerDict = remoteservice.recv()
    except Exception, e:
        logger.exception(e)
        remoteservice.finish()
        return {'errorMessage' : 'An error occurred with the root analysis process, please try again later',
                'template' : 'analysis/error.html' }