Пример #1
0
def repository_run(request):
    logger.debug("analysis_manager.views.repository_run called")

    if request.method == 'POST':
        print(simplejson.dumps(request.POST, indent=4))

        # attributes associated with node selection from interface
        node_selection_blacklist_mode = request.POST[
            'node_selection_blacklist_mode']
        if node_selection_blacklist_mode == 'true':
            node_selection_blacklist_mode = True
        else:
            node_selection_blacklist_mode = False
        node_selection = request.POST.getlist('node_selection[]')

        # solr results
        solr_query = request.POST["query"]
        solr_uuids = get_solr_results(
            solr_query,
            only_uuids=True,
            selected_mode=node_selection_blacklist_mode,
            selected_nodes=node_selection)

        # gets workflow_uuid
        workflow_uuid = request.POST['workflow_choice']

        # get study uuid
        study_uuid = request.POST['study_uuid']

        # retrieving workflow based on input workflow_uuid
        curr_workflow = Workflow.objects.filter(uuid=workflow_uuid)[0]

        # TODO: catch if study or data set don't exist
        study = Study.objects.get(uuid=study_uuid)
        data_set = InvestigationLink.objects.filter(
            investigation__uuid=study.investigation.uuid).order_by(
                "version").reverse()[0].data_set

        logger.info("Associating analysis with data set %s (%s)" %
                    (data_set, data_set.uuid))

        ######### ANALYSIS MODEL ########
        # How to create a simple analysis object
        temp_name = curr_workflow.name + " " + datetime.now().strftime(
            "%Y-%m-%d @ %H:%M:%S")
        summary_name = "None provided."

        analysis = Analysis(
            summary=summary_name,
            name=temp_name,
            project=request.user.get_profile().catch_all_project,
            data_set=data_set,
            workflow=curr_workflow,
            time_start=datetime.now())
        analysis.save()

        #setting the owner
        analysis.set_owner(request.user)

        # gets galaxy internal id for specified workflow
        workflow_galaxy_id = curr_workflow.internal_id

        # getting distinct workflow inputs
        workflow_data_inputs = curr_workflow.data_inputs.all()[0]

        # NEED TO GET LIST OF FILE_UUIDS from solr query
        count = 0
        for file_uuid in solr_uuids:
            count += 1
            temp_input = WorkflowDataInputMap(
                workflow_data_input_name=workflow_data_inputs.name,
                data_uuid=file_uuid,
                pair_id=count)
            temp_input.save()
            analysis.workflow_data_input_maps.add(temp_input)
            analysis.save()

        # keeping new reference to analysis_status
        analysis_status = AnalysisStatus.objects.create(analysis=analysis)
        analysis_status.save()

        # call function via analysis_manager
        run_analysis.delay(analysis)

        #import pdb; pdb.set_trace()
        logger.debug(
            request.build_absolute_uri(
                reverse('analysis_manager.views.analysis_status',
                        args=(analysis.uuid, ))))

        ret_url = request.build_absolute_uri(
            reverse('analysis_manager.views.analysis_status',
                    args=(analysis.uuid, )))
        return HttpResponse(simplejson.dumps(ret_url),
                            mimetype='application/json')
Пример #2
0
def run(request):
    """Run analysis, return URL of the analysis status page
    Needs re-factoring

    """
    logger.debug("analysis_manager.views.run called")

    if not request.is_ajax():
        return HttpResponseBadRequest()  # 400
    allowed_methods = ['POST']
    if request.method not in allowed_methods:
        return HttpResponseNotAllowed(allowed_methods)  # 405

    analysis_config = simplejson.loads(request.body)
    try:
        workflow_uuid = analysis_config['workflowUuid']
        study_uuid = analysis_config['studyUuid']
        node_set_uuid = analysis_config['nodeSetUuid']
        node_relationship_uuid = analysis_config['nodeRelationshipUuid']
    except KeyError:
        return HttpResponseBadRequest()  # 400
    # must provide workflow and study UUIDs,
    # and either node set UUID or node relationship UUID
    if not (workflow_uuid and study_uuid and
            (node_set_uuid or node_relationship_uuid)):
        return HttpResponseBadRequest()  # 400

    # single-input workflow
    if node_set_uuid:
        #TODO: handle DoesNotExist exception
        curr_node_set = NodeSet.objects.get(uuid=node_set_uuid)
        curr_node_dict = curr_node_set.solr_query_components
        curr_node_dict = simplejson.loads(curr_node_dict)

        # solr results
        solr_uuids = get_solr_results(
            curr_node_set.solr_query,
            only_uuids=True,
            selected_mode=curr_node_dict['documentSelectionBlacklistMode'],
            selected_nodes=curr_node_dict['documentSelection'])

        # retrieving workflow based on input workflow_uuid
        #TODO: handle DoesNotExist exception
        curr_workflow = Workflow.objects.filter(uuid=workflow_uuid)[0]

        # TODO: catch if study or data set don't exist
        study = Study.objects.get(uuid=study_uuid)
        data_set = InvestigationLink.objects.filter(
            investigation__uuid=study.investigation.uuid).order_by(
                "version").reverse()[0].data_set

        logger.info("Associating analysis with data set %s (%s)" %
                    (data_set, data_set.uuid))

        ######### ANALYSIS MODEL ########
        # How to create a simple analysis object
        temp_name = curr_workflow.name + " " + datetime.now().strftime(
            "%Y-%m-%d @ %H:%M:%S")
        summary_name = "None provided."

        analysis = Analysis(
            summary=summary_name,
            name=temp_name,
            project=request.user.get_profile().catch_all_project,
            data_set=data_set,
            workflow=curr_workflow,
            time_start=datetime.now())
        analysis.save()

        analysis.set_owner(request.user)

        # gets galaxy internal id for specified workflow
        workflow_galaxy_id = curr_workflow.internal_id

        # getting distinct workflow inputs
        workflow_data_inputs = curr_workflow.data_inputs.all()[0]

        # NEED TO GET LIST OF FILE_UUIDS from solr query
        count = 0
        for file_uuid in solr_uuids:
            count += 1
            temp_input = WorkflowDataInputMap(
                workflow_data_input_name=workflow_data_inputs.name,
                data_uuid=file_uuid,
                pair_id=count)
            temp_input.save()
            analysis.workflow_data_input_maps.add(temp_input)
            analysis.save()

    # dual-input workflow
    if node_relationship_uuid:
        # Input list for running analysis
        ret_list = []
        # retrieving workflow based on input workflow_uuid
        curr_workflow = Workflow.objects.get(uuid=workflow_uuid)

        # TODO: catch if study or data set don't exist
        study = Study.objects.get(uuid=study_uuid)
        data_set = InvestigationLink.objects.filter(
            investigation__uuid=study.investigation.uuid).order_by(
                "version").reverse()[0].data_set

        # Get node relationship model
        curr_relationship = NodeRelationship.objects.get(
            uuid=node_relationship_uuid)

        # Iterating over node pairs
        input_keys = []
        base_input = {}
        # defining inputs used for analysis
        for workflow_inputs in curr_workflow.input_relationships.all():
            base_input[workflow_inputs.set1] = {}
            base_input[workflow_inputs.set2] = {}
            input_keys.append(workflow_inputs.set1)
            input_keys.append(workflow_inputs.set2)

        # creating instance of instance of input data pairing for analysis,
        # i.e. [{u'exp_file': {'node_uuid': u'3d061699-6bc8-11e2-9b55-406c8f1d5108', 'pair_id': 1}, u'input_file': {'node_uuid': u'3d180d11-6bc8-11e2-9bc7-406c8f1d5108', 'pair_id': 1}}]
        count = 1
        for curr_pair in curr_relationship.node_pairs.all():
            temp_pair = copy.deepcopy(base_input)
            print "curr_pair"
            print temp_pair
            print curr_pair
            #temp_pair = {}
            if curr_pair.node2:
                #print curr_pair.node2.uuid
                temp_pair[input_keys[0]]['node_uuid'] = curr_pair.node1.uuid
                temp_pair[input_keys[0]]['pair_id'] = count
                temp_pair[input_keys[1]]['node_uuid'] = curr_pair.node2.uuid
                temp_pair[input_keys[1]]['pair_id'] = count
                ret_list.append(temp_pair)
                print temp_pair
                count += 1

        logger.info("Associating analysis with data set %s (%s)" %
                    (data_set, data_set.uuid))

        ######### ANALYSIS MODEL ########
        # How to create a simple analysis object
        temp_name = curr_workflow.name + " " + str(datetime.now())
        summary_name = "None provided."

        analysis = Analysis(
            summary=summary_name,
            name=temp_name,
            project=request.user.get_profile().catch_all_project,
            data_set=data_set,
            workflow=curr_workflow,
            time_start=datetime.now())
        analysis.save()

        analysis.set_owner(request.user)

        # gets galaxy internal id for specified workflow
        workflow_galaxy_id = curr_workflow.internal_id

        # getting distinct workflow inputs
        workflow_data_inputs = curr_workflow.data_inputs.all()

        logger.debug("ret_list")
        logger.debug(simplejson.dumps(ret_list, indent=4))

        ######### ANALYSIS MODEL
        # Updating Refinery Models for updated workflow input (galaxy worfkflow input id & node_uuid
        count = 0
        for samp in ret_list:
            count += 1
            for k, v in samp.items():
                temp_input = WorkflowDataInputMap(
                    workflow_data_input_name=k,
                    data_uuid=samp[k]["node_uuid"],
                    pair_id=count)
                temp_input.save()
                analysis.workflow_data_input_maps.add(temp_input)
                analysis.save()

    # keeping new reference to analysis_status
    analysis_status = AnalysisStatus.objects.create(analysis=analysis)
    analysis_status.save()

    # call function via analysis_manager
    run_analysis.delay(analysis)

    redirect_url = reverse('analysis_manager.views.analysis_status',
                           args=(analysis.uuid, ))
    return HttpResponse(redirect_url)
Пример #3
0
def analysis_run(request):
    logger.debug("analysis_manager.views.analysis_run called")
    logger.debug("POST request content\n" +
                 simplejson.dumps(request.POST, indent=4))

    # gets workflow_uuid
    workflow_uuid = request.POST.getlist('workflow_choice')[0]

    # get study uuid
    study_uuid = request.POST.getlist('study_uuid')[0]

    # list of selected assays
    selected_uuids = {}

    # finds all selected assays
    # (node_uuid, and associated workflow input type for selected samples)
    for i, val in request.POST.iteritems():
        if (val and val != ""):
            if (i.startswith('assay_')):
                temp_uuid = i.replace('assay_', '')
                selected_uuids[temp_uuid] = val

    #### DEBUG CODE ####
    # Turn input from POST into ingestable data/exp format
    # retrieving workflow based on input workflow_uuid
    annot_inputs = get_workflow_inputs(workflow_uuid)
    len_inputs = len(set(annot_inputs))

    logger.debug("selected_uuids: " + selected_uuids)

    #------------ CONFIGURE INPUT FILES -------------------------- #
    ret_list = []
    ret_item = copy.deepcopy(annot_inputs)
    pair_count = 0
    pair = 1
    tcount = 0

    #for sd in selected_data:
    while len(selected_uuids) != 0:
        tcount += 1
        if tcount > 5000:
            break

        for k, v in ret_item.iteritems():
            for index, sd in selected_uuids.items():

                # dealing w/ cases where their are more than input for a galaxy workflow
                if len_inputs > 1:
                    if k == sd and ret_item[k] is None:
                        ret_item[k] = {}
                        ret_item[k]["node_uuid"] = index
                        ret_item[k]["pair_id"] = pair
                        pair_count += 1
                        del selected_uuids[index]

                    if pair_count == 2:
                        ret_list.append(ret_item)
                        ret_item = copy.deepcopy(annot_inputs)
                        pair_count = 0
                        pair += 1

                # deals w/ the case where there is a single input for a galaxy workflow
                elif len_inputs == 1:
                    ret_item = copy.deepcopy(annot_inputs)
                    ret_item[k] = {}
                    ret_item[k]["node_uuid"] = index
                    ret_item[k]["pair_id"] = pair
                    ret_list.append(ret_item)
                    del selected_uuids[index]
                    pair += 1

    # retrieving workflow based on input workflow_uuid
    curr_workflow = Workflow.objects.filter(uuid=workflow_uuid)[0]

    ### ----------------------------------------------------------------#
    ### REFINERY MODEL UPDATES ###

    # TODO: catch if study or data set don't exist
    study = Study.objects.get(uuid=study_uuid)
    data_set = InvestigationLink.objects.filter(
        investigation__uuid=study.investigation.uuid).order_by(
            "version").reverse()[0].data_set

    logger.info("Associating analysis with data set %s (%s)" %
                (data_set, data_set.uuid))

    ######### ANALYSIS MODEL ########
    # How to create a simple analysis object
    temp_name = curr_workflow.name + " " + str(datetime.now())
    summary_name = "None provided."

    analysis = Analysis(summary=summary_name,
                        name=temp_name,
                        project=request.user.get_profile().catch_all_project,
                        data_set=data_set,
                        workflow=curr_workflow,
                        time_start=datetime.now())
    analysis.save()

    #setting the owner
    analysis.set_owner(request.user)

    # gets galaxy internal id for specified workflow
    workflow_galaxy_id = curr_workflow.internal_id

    # getting distinct workflow inputs
    workflow_data_inputs = curr_workflow.data_inputs.all()

    logger.debug("ret_list")
    logger.debug(simplejson.dumps(ret_list, indent=4))

    ######### ANALYSIS MODEL
    # Updating Refinery Models for updated workflow input (galaxy worfkflow input id & node_uuid
    count = 0
    for samp in ret_list:
        count += 1
        for k, v in samp.items():
            temp_input = WorkflowDataInputMap(workflow_data_input_name=k,
                                              data_uuid=samp[k]["node_uuid"],
                                              pair_id=count)

            temp_input.save()
            analysis.workflow_data_input_maps.add(temp_input)
            analysis.save()

    # keeping new reference to analysis_status
    #analysis_status = AnalysisStatus.objects.create(analysis_uuid=analysis.uuid)
    analysis_status = AnalysisStatus.objects.create(analysis=analysis)
    analysis_status.save()

    # call function via analysis_manager
    run_analysis.delay(analysis)

    return HttpResponseRedirect(
        reverse('analysis_manager.views.analysis_status',
                args=(analysis.uuid, )))
def run(request):
    """Run analysis, return URL of the analysis status page
    Needs re-factoring

    """
    logger.debug("analysis_manager.views.run called")

    if not request.is_ajax():
        return HttpResponseBadRequest()  # 400
    allowed_methods = ['POST']
    if request.method not in allowed_methods:
        return HttpResponseNotAllowed(allowed_methods)  # 405

    analysis_config = simplejson.loads(request.body)
    try:
        workflow_uuid = analysis_config['workflowUuid']
        study_uuid = analysis_config['studyUuid']
        node_set_uuid = analysis_config['nodeSetUuid']
        node_relationship_uuid = analysis_config['nodeRelationshipUuid']
    except KeyError:
        return HttpResponseBadRequest()  # 400
    # must provide workflow and study UUIDs,
    # and either node set UUID or node relationship UUID
    if not (workflow_uuid and study_uuid and (node_set_uuid or node_relationship_uuid)):
        return HttpResponseBadRequest()  # 400

    # single-input workflow
    if node_set_uuid:
        #TODO: handle DoesNotExist exception
        curr_node_set = NodeSet.objects.get(uuid=node_set_uuid)
        curr_node_dict = curr_node_set.solr_query_components
        curr_node_dict = simplejson.loads(curr_node_dict)

        # solr results
        solr_uuids = get_solr_results(
            curr_node_set.solr_query,
            only_uuids=True,
            selected_mode=curr_node_dict['documentSelectionBlacklistMode'],
            selected_nodes=curr_node_dict['documentSelection']
        )

        # retrieving workflow based on input workflow_uuid
        #TODO: handle DoesNotExist exception
        curr_workflow = Workflow.objects.filter(uuid=workflow_uuid)[0]
        
        # TODO: catch if study or data set don't exist
        study = Study.objects.get(uuid=study_uuid);
        data_set = InvestigationLink.objects.filter(
            investigation__uuid=study.investigation.uuid).order_by(
                "version").reverse()[0].data_set;

        logger.info("Associating analysis with data set %s (%s)"
                    % (data_set, data_set.uuid))

        ######### ANALYSIS MODEL ########
        # How to create a simple analysis object
        temp_name = curr_workflow.name + " " + datetime.now().strftime("%Y-%m-%d @ %H:%M:%S")
        summary_name = "None provided."

        analysis = Analysis(summary=summary_name,
                            name=temp_name,
                            project=request.user.get_profile().catch_all_project,
                            data_set=data_set,
                            workflow=curr_workflow,
                            time_start=datetime.now())
        analysis.save()

        analysis.set_owner(request.user)

        # gets galaxy internal id for specified workflow
        workflow_galaxy_id = curr_workflow.internal_id

        # getting distinct workflow inputs
        workflow_data_inputs = curr_workflow.data_inputs.all()[0]

        # NEED TO GET LIST OF FILE_UUIDS from solr query 
        count = 0;
        for file_uuid in solr_uuids:
            count += 1
            temp_input = WorkflowDataInputMap(
                workflow_data_input_name=workflow_data_inputs.name,
                data_uuid=file_uuid,
                pair_id=count
            )
            temp_input.save() 
            analysis.workflow_data_input_maps.add(temp_input)
            analysis.save()

    # dual-input workflow
    if node_relationship_uuid:
        # Input list for running analysis
        ret_list = []
        # retrieving workflow based on input workflow_uuid
        curr_workflow = Workflow.objects.get(uuid=workflow_uuid)

        # TODO: catch if study or data set don't exist
        study = Study.objects.get(uuid=study_uuid);
        data_set = InvestigationLink.objects.filter(
            investigation__uuid=study.investigation.uuid).order_by(
                "version").reverse()[0].data_set;

        # Get node relationship model
        curr_relationship = NodeRelationship.objects.get(uuid=node_relationship_uuid)

        # Iterating over node pairs
        input_keys = [] 
        base_input = {}
        # defining inputs used for analysis
        for workflow_inputs in curr_workflow.input_relationships.all():
            base_input[workflow_inputs.set1] = {}
            base_input[workflow_inputs.set2] = {}
            input_keys.append(workflow_inputs.set1)
            input_keys.append(workflow_inputs.set2)
        
        # creating instance of instance of input data pairing for analysis,
        # i.e. [{u'exp_file': {'node_uuid': u'3d061699-6bc8-11e2-9b55-406c8f1d5108', 'pair_id': 1}, u'input_file': {'node_uuid': u'3d180d11-6bc8-11e2-9bc7-406c8f1d5108', 'pair_id': 1}}]
        count = 1
        for curr_pair in curr_relationship.node_pairs.all():
            temp_pair = copy.deepcopy(base_input)
            print "curr_pair"
            print temp_pair
            print curr_pair
            #temp_pair = {}
            if curr_pair.node2:
                #print curr_pair.node2.uuid
                temp_pair[input_keys[0]]['node_uuid'] = curr_pair.node1.uuid
                temp_pair[input_keys[0]]['pair_id'] = count
                temp_pair[input_keys[1]]['node_uuid'] = curr_pair.node2.uuid
                temp_pair[input_keys[1]]['pair_id'] = count
                ret_list.append(temp_pair)
                print temp_pair
                count += 1

        logger.info("Associating analysis with data set %s (%s)"
                    % (data_set, data_set.uuid))

        ######### ANALYSIS MODEL ########
        # How to create a simple analysis object
        temp_name = curr_workflow.name + " " + str(datetime.now())
        summary_name = "None provided."

        analysis = Analysis(summary=summary_name,
                            name=temp_name,
                            project=request.user.get_profile().catch_all_project,
                            data_set=data_set,
                            workflow=curr_workflow,
                            time_start=datetime.now())
        analysis.save()

        analysis.set_owner(request.user)

        # gets galaxy internal id for specified workflow
        workflow_galaxy_id = curr_workflow.internal_id

        # getting distinct workflow inputs
        workflow_data_inputs = curr_workflow.data_inputs.all()

        logger.debug("ret_list")
        logger.debug(simplejson.dumps(ret_list, indent=4))
        
        ######### ANALYSIS MODEL 
        # Updating Refinery Models for updated workflow input (galaxy worfkflow input id & node_uuid 
        count = 0
        for samp in ret_list:
            count += 1
            for k,v in samp.items():
                temp_input = WorkflowDataInputMap(
                    workflow_data_input_name=k,
                    data_uuid=samp[k]["node_uuid"],
                    pair_id=count)
                temp_input.save()
                analysis.workflow_data_input_maps.add(temp_input)
                analysis.save()

    # keeping new reference to analysis_status
    analysis_status = AnalysisStatus.objects.create(analysis=analysis)
    analysis_status.save()
    
    # call function via analysis_manager
    run_analysis.delay(analysis)

    redirect_url = reverse('analysis_manager.views.analysis_status', args=(analysis.uuid,))
    return HttpResponse(redirect_url)
def repository_run(request):
    logger.debug( "analysis_manager.views.repository_run called")
        
    if request.method == 'POST':
        print( simplejson.dumps(request.POST, indent=4) )  
    
        # attributes associated with node selection from interface
        node_selection_blacklist_mode = request.POST['node_selection_blacklist_mode']
        if node_selection_blacklist_mode == 'true':
            node_selection_blacklist_mode = True
        else:
            node_selection_blacklist_mode = False
        node_selection = request.POST.getlist('node_selection[]')
        
        # solr results
        solr_query = request.POST["query"]
        solr_uuids = get_solr_results(solr_query, only_uuids=True, selected_mode=node_selection_blacklist_mode, selected_nodes=node_selection)
        
        # gets workflow_uuid
        workflow_uuid = request.POST['workflow_choice']
    
        # get study uuid
        study_uuid = request.POST['study_uuid']
    
        # retrieving workflow based on input workflow_uuid
        curr_workflow = Workflow.objects.filter(uuid=workflow_uuid)[0]
        
        # TODO: catch if study or data set don't exist
        study = Study.objects.get( uuid=study_uuid );
        data_set = InvestigationLink.objects.filter( investigation__uuid=study.investigation.uuid ).order_by( "version" ).reverse()[0].data_set;
        
        logger.info( "Associating analysis with data set %s (%s)" % ( data_set, data_set.uuid ) )
        
        ######### ANALYSIS MODEL ########
        # How to create a simple analysis object
        temp_name = curr_workflow.name + " " + datetime.now().strftime("%Y-%m-%d @ %H:%M:%S")
        summary_name = "None provided."
        
        analysis = Analysis( summary=summary_name, name=temp_name, project=request.user.get_profile().catch_all_project, data_set=data_set, workflow=curr_workflow, time_start=datetime.now() )
        analysis.save()   
    
        #setting the owner
        analysis.set_owner(request.user)
        
        # gets galaxy internal id for specified workflow
        workflow_galaxy_id = curr_workflow.internal_id
        
        # getting distinct workflow inputs
        workflow_data_inputs = curr_workflow.data_inputs.all()[0]
        
        # NEED TO GET LIST OF FILE_UUIDS from solr query 
        count = 0;
        for file_uuid in solr_uuids:
            count += 1
            temp_input = WorkflowDataInputMap( workflow_data_input_name=workflow_data_inputs.name, data_uuid=file_uuid, pair_id=count)   
            temp_input.save() 
            analysis.workflow_data_input_maps.add( temp_input ) 
            analysis.save() 
        
        # keeping new reference to analysis_status
        analysis_status = AnalysisStatus.objects.create(analysis=analysis)
        analysis_status.save()
        
        # call function via analysis_manager
        run_analysis.delay(analysis)
        
        #import pdb; pdb.set_trace()
        logger.debug(request.build_absolute_uri(reverse('analysis_manager.views.analysis_status', args=(analysis.uuid,)) ))
        
        ret_url = request.build_absolute_uri(reverse('analysis_manager.views.analysis_status', args=(analysis.uuid,)) )
        return HttpResponse(simplejson.dumps(ret_url), mimetype='application/json')
def analysis_run(request):
    logger.debug("analysis_manager.views.analysis_run called")
    logger.debug("POST request content\n" + simplejson.dumps(request.POST, indent=4))

    # gets workflow_uuid
    workflow_uuid = request.POST.getlist('workflow_choice')[0]
    
    # get study uuid
    study_uuid = request.POST.getlist('study_uuid')[0]
    
    # list of selected assays
    selected_uuids = {};
    
    # finds all selected assays
    # (node_uuid, and associated workflow input type for selected samples) 
    for i, val in request.POST.iteritems():
        if (val and val != ""):
            if (i.startswith('assay_')):
                temp_uuid = i.replace('assay_', '')
                selected_uuids[temp_uuid] = val
                
    #### DEBUG CODE ####
    # Turn input from POST into ingestable data/exp format 
    # retrieving workflow based on input workflow_uuid
    annot_inputs = get_workflow_inputs(workflow_uuid)
    len_inputs = len(set(annot_inputs))

    logger.debug("selected_uuids: " + selected_uuids)

    #------------ CONFIGURE INPUT FILES -------------------------- #   
    ret_list = [];
    ret_item = copy.deepcopy(annot_inputs)
    pair_count = 0
    pair = 1
    tcount = 0
    
    #for sd in selected_data:
    while len(selected_uuids) != 0:
        tcount += 1
        if tcount > 5000:
            break
        
        for k, v in ret_item.iteritems():
            for index, sd in selected_uuids.items():
                
                # dealing w/ cases where their are more than input for a galaxy workflow
                if len_inputs > 1:   
                    if k == sd and ret_item[k] is None:       
                        ret_item[k] = {}
                        ret_item[k]["node_uuid"] = index
                        ret_item[k]["pair_id"] = pair
                        pair_count += 1
                        del selected_uuids[index]
                        
                    if pair_count == 2:
                        ret_list.append(ret_item)
                        ret_item = copy.deepcopy(annot_inputs)
                        pair_count = 0
                        pair += 1
                        
                # deals w/ the case where there is a single input for a galaxy workflow
                elif len_inputs == 1:
                    ret_item = copy.deepcopy(annot_inputs)
                    ret_item[k] = {};
                    ret_item[k]["node_uuid"] = index
                    ret_item[k]["pair_id"] = pair
                    ret_list.append(ret_item)
                    del selected_uuids[index]
                    pair += 1;
    
    
    # retrieving workflow based on input workflow_uuid
    curr_workflow = Workflow.objects.filter(uuid=workflow_uuid)[0]
    
    ### ----------------------------------------------------------------#
    ### REFINERY MODEL UPDATES ###
    
    # TODO: catch if study or data set don't exist
    study = Study.objects.get( uuid=study_uuid );
    data_set = InvestigationLink.objects.filter( investigation__uuid=study.investigation.uuid ).order_by( "version" ).reverse()[0].data_set;
    
    logger.info( "Associating analysis with data set %s (%s)" % ( data_set, data_set.uuid ) )
    
    ######### ANALYSIS MODEL ########
    # How to create a simple analysis object
    temp_name = curr_workflow.name + " " + str( datetime.now() )
    summary_name = "None provided."
    
    analysis = Analysis( summary=summary_name, name=temp_name, project=request.user.get_profile().catch_all_project, data_set=data_set, workflow=curr_workflow, time_start=datetime.now() )
    analysis.save()   

    #setting the owner
    analysis.set_owner(request.user)
    
    # gets galaxy internal id for specified workflow
    workflow_galaxy_id = curr_workflow.internal_id
    
    # getting distinct workflow inputs
    workflow_data_inputs = curr_workflow.data_inputs.all()
    
    logger.debug("ret_list")
    logger.debug(simplejson.dumps(ret_list, indent=4))
    
    ######### ANALYSIS MODEL 
    # Updating Refinery Models for updated workflow input (galaxy worfkflow input id & node_uuid 
    count = 0;
    for samp in ret_list:
        count += 1
        for k,v in samp.items():
            temp_input = WorkflowDataInputMap( workflow_data_input_name=k, data_uuid=samp[k]["node_uuid"], pair_id=count)
            
            temp_input.save() 
            analysis.workflow_data_input_maps.add( temp_input ) 
            analysis.save() 
    
    # keeping new reference to analysis_status
    #analysis_status = AnalysisStatus.objects.create(analysis_uuid=analysis.uuid)
    analysis_status = AnalysisStatus.objects.create(analysis=analysis)
    analysis_status.save()

    # call function via analysis_manager
    run_analysis.delay(analysis)

    return HttpResponseRedirect(reverse('analysis_manager.views.analysis_status', args=(analysis.uuid,)))
Пример #7
0
def run(request):
    """Run analysis, return URL of the analysis status page
    Needs re-factoring
    """
    logger.debug("Received request to start analysis")
    if not request.is_ajax():
        return HttpResponseBadRequest()  # 400
    allowed_methods = ['POST']
    if request.method not in allowed_methods:
        return HttpResponseNotAllowed(allowed_methods)  # 405

    analysis_config = json.loads(request.body)
    try:
        workflow_uuid = analysis_config['workflowUuid']
        study_uuid = analysis_config['studyUuid']
        node_set_uuid = analysis_config['nodeSetUuid']
        node_group_uuid = analysis_config['nodeGroupUuid']
        node_relationship_uuid = analysis_config['nodeRelationshipUuid']
        custom_name = analysis_config['name']
    except KeyError:
        return HttpResponseBadRequest()  # 400
    # must provide workflow and study UUIDs,
    # and either node set UUID or node relationship UUID
    if not (workflow_uuid and study_uuid and
            (node_set_uuid or node_relationship_uuid or node_group_uuid)):
        return HttpResponseBadRequest()  # 400

    # single-input workflow based node group
    if node_group_uuid:
        try:
            curr_node_group = NodeGroup.objects.get(uuid=node_group_uuid)
        except NodeGroup.DoesNotExist:
            logger.error("Node Group with UUID '{}' does not exist".format(
                node_group_uuid))
            return HttpResponse(status='404')
        except NodeGroup.MultipleObjectsReturned:
            logger.error("Node Group with UUID '{}' returned multiple "
                         "objects".format(node_group_uuid))
            return HttpResponse(status='500')

        try:
            curr_workflow = Workflow.objects.get(uuid=workflow_uuid)
        except Workflow.DoesNotExist:
            logger.error("WorkFlow with UUID '{}' does not exist".format(
                workflow_uuid))
            return HttpResponse(status='404')
        except Workflow.MultipleObjectsReturned:
            logger.error("WorkFlow with UUID '{}' returns multiple objects"
                         .format(workflow_uuid))
            return HttpResponse(status='500')

        try:
            study = Study.objects.get(uuid=study_uuid)
        except Study.DoesNotExist:
            logger.error("Study with UUID '{}' does not exist".format(
                study_uuid))
            return HttpResponse(status='404')
        except Study.MultipleObjectsReturned:
            logger.error("Study with UUID '{}' returns multiple objects"
                         .format(study_uuid))
            return HttpResponse(status='500')

        investigation_links = InvestigationLink.objects.filter(
            investigation__uuid=study.investigation.uuid).order_by(
                "version")
        if not investigation_links:
            logger.error("InvestigationLink with UUID '{}' with does not "
                         "exist".format(study.investigation.uuid))
            return HttpResponse(status='404')

        data_set = investigation_links.reverse()[0].data_set
        logger.info("Associating analysis with data set %s (%s)",
                    data_set, data_set.uuid)

        # ANALYSIS MODEL
        # How to create a simple analysis object
        if not custom_name:
            temp_name = curr_workflow.name + " " + get_aware_local_time()\
                .strftime("%Y-%m-%d @ %H:%M:%S")
        else:
            temp_name = custom_name

        summary_name = "None provided."
        analysis = Analysis.objects.create(
            summary=summary_name,
            name=temp_name,
            project=request.user.get_profile().catch_all_project,
            data_set=data_set,
            workflow=curr_workflow,
            time_start=timezone.now()
        )
        analysis.set_owner(request.user)

        # getting distinct workflow inputs
        try:
            workflow_data_inputs = curr_workflow.data_inputs.all()[0]
        except IndexError:
            logger.error("Workflow with UUID '{}' has an index "
                         "error with inputs".format(workflow_uuid.uuid))
            return HttpResponse(status='500')

        # NEED TO GET LIST OF FILE_UUIDS from node_group_uuid fields
        count = 0
        for node_file in curr_node_group.nodes.all():
            count += 1
            temp_input = WorkflowDataInputMap.objects.create(
                workflow_data_input_name=workflow_data_inputs.name,
                data_uuid=node_file.uuid,
                pair_id=count
            )
            analysis.workflow_data_input_maps.add(temp_input)
            analysis.save()

    # single-input workflow
    if node_set_uuid:
        # TODO: handle DoesNotExist exception
        curr_node_set = NodeSet.objects.get(uuid=node_set_uuid)
        curr_node_dict = curr_node_set.solr_query_components
        curr_node_dict = json.loads(curr_node_dict)
        # solr results
        solr_uuids = get_solr_results(
            curr_node_set.solr_query,
            only_uuids=True,
            selected_mode=curr_node_dict['documentSelectionBlacklistMode'],
            selected_nodes=curr_node_dict['documentSelection']
        )
        # retrieving workflow based on input workflow_uuid
        # TODO: handle DoesNotExist exception
        curr_workflow = Workflow.objects.filter(uuid=workflow_uuid)[0]

        # TODO: catch if study or data set don't exist
        study = Study.objects.get(uuid=study_uuid)
        data_set = InvestigationLink.objects.filter(
            investigation__uuid=study.investigation.uuid).order_by(
                "version").reverse()[0].data_set

        logger.info("Associating analysis with data set %s (%s)",
                    data_set, data_set.uuid)

        # ANALYSIS MODEL
        # How to create a simple analysis object
        if not custom_name:
            temp_name = curr_workflow.name + " " + get_aware_local_time()\
                .strftime("%Y-%m-%d @ %H:%M:%S")
        else:
            temp_name = custom_name

        summary_name = "None provided."
        analysis = Analysis.objects.create(
            summary=summary_name,
            name=temp_name,
            project=request.user.get_profile().catch_all_project,
            data_set=data_set,
            workflow=curr_workflow,
            time_start=timezone.now()
        )
        analysis.set_owner(request.user)

        # getting distinct workflow inputs
        workflow_data_inputs = curr_workflow.data_inputs.all()[0]

        # NEED TO GET LIST OF FILE_UUIDS from solr query
        count = 0
        for file_uuid in solr_uuids:
            count += 1
            temp_input = WorkflowDataInputMap.objects.create(
                workflow_data_input_name=workflow_data_inputs.name,
                data_uuid=file_uuid,
                pair_id=count
            )
            analysis.workflow_data_input_maps.add(temp_input)
            analysis.save()

    # dual-input workflow
    if node_relationship_uuid:
        # Input list for running analysis
        ret_list = []
        # retrieving workflow based on input workflow_uuid
        curr_workflow = Workflow.objects.get(uuid=workflow_uuid)

        # TODO: catch if study or data set don't exist
        study = Study.objects.get(uuid=study_uuid)
        data_set = InvestigationLink.objects.filter(
            investigation__uuid=study.investigation.uuid).order_by(
                "version").reverse()[0].data_set

        # Get node relationship model
        curr_relationship = NodeRelationship.objects.get(
            uuid=node_relationship_uuid)
        # Iterating over node pairs
        input_keys = []
        base_input = {}
        # defining inputs used for analysis
        for workflow_inputs in curr_workflow.input_relationships.all():
            base_input[workflow_inputs.set1] = {}
            base_input[workflow_inputs.set2] = {}
            input_keys.append(workflow_inputs.set1)
            input_keys.append(workflow_inputs.set2)

        # creating instance of instance of input data pairing for analysis,
        # i.e. [{u'exp_file':
        # {'node_uuid': u'3d061699-6bc8-11e2-9b55-406c8f1d5108', 'pair_id': 1},
        # u'input_file':
        # {'node_uuid': u'3d180d11-6bc8-11e2-9bc7-406c8f1d5108', 'pair_id': 1}}
        # ]
        count = 1
        for curr_pair in curr_relationship.node_pairs.all():
            temp_pair = copy.deepcopy(base_input)
            logger.debug("Temp Pair: %s", temp_pair)
            logger.debug("Current Pair: %s", curr_pair)
            if curr_pair.node2:
                temp_pair[input_keys[0]]['node_uuid'] = curr_pair.node1.uuid
                temp_pair[input_keys[0]]['pair_id'] = count
                temp_pair[input_keys[1]]['node_uuid'] = curr_pair.node2.uuid
                temp_pair[input_keys[1]]['pair_id'] = count
                ret_list.append(temp_pair)
                logger.debug("Temp Pair: %s", temp_pair)
                count += 1

        logger.info("Associating analysis with data set %s (%s)",
                    data_set, data_set.uuid)

        # ANALYSIS MODEL
        # How to create a simple analysis object
        if not custom_name:
            temp_name = curr_workflow.name + " " + get_aware_local_time()\
                .strftime("%Y-%m-%d @ %H:%M:%S")
        else:
            temp_name = custom_name

        summary_name = "None provided."

        analysis = Analysis.objects.create(
            summary=summary_name,
            name=temp_name,
            project=request.user.get_profile().catch_all_project,
            data_set=data_set,
            workflow=curr_workflow,
            time_start=timezone.now()
        )
        analysis.set_owner(request.user)

        # getting distinct workflow inputs
        workflow_data_inputs = curr_workflow.data_inputs.all()

        logger.debug("ret_list")
        logger.debug(json.dumps(ret_list, indent=4))

        # ANALYSIS MODEL
        # Updating Refinery Models for updated workflow input
        # (galaxy worfkflow input id & node_uuid)
        count = 0
        for samp in ret_list:
            count += 1
            for k, v in samp.items():
                temp_input = WorkflowDataInputMap.objects.create(
                    workflow_data_input_name=k,
                    data_uuid=samp[k]["node_uuid"],
                    pair_id=count)
                analysis.workflow_data_input_maps.add(temp_input)
                analysis.save()

    # keeping new reference to analysis_status
    analysis_status = AnalysisStatus.objects.create(analysis=analysis)
    analysis_status.save()

    # call function via analysis_manager
    run_analysis.delay(analysis.uuid)

    return HttpResponse(reverse('analysis-status', args=(analysis.uuid,)))