コード例 #1
0
ファイル: views.py プロジェクト: glormph/kantele
def save_or_update_files(data):
    dset_id = data['dataset_id']
    added_fnids = [x['id'] for x in data['added_files'].values()]
    if added_fnids:
        dset = models.Dataset.objects.get(pk=dset_id)
        models.DatasetRawFile.objects.bulk_create([
            models.DatasetRawFile(dataset_id=dset_id, rawfile_id=fnid)
            for fnid in added_fnids])
        filemodels.RawFile.objects.filter(
            pk__in=added_fnids).update(claimed=True)
        create_dataset_job('move_files_storage', dset_id, dset.storage_loc,
                           added_fnids)
    removed_ids = [int(x['id']) for x in data['removed_files'].values()]
    if removed_ids:
        models.DatasetRawFile.objects.filter(
            dataset_id=dset_id, rawfile_id__in=removed_ids).delete()
        filemodels.RawFile.objects.filter(pk__in=removed_ids).update(
            claimed=False)
        create_dataset_job('move_stored_files_tmp', dset_id, removed_ids)
    # If files changed and labelfree, set sampleprep component status
    # to not good. Which should update the tab colour (green to red)
    try:
        qtype = models.Dataset.objects.select_related(
            'quantdataset__quanttype').get(pk=dset_id).quantdataset.quanttype
    except models.QuantDataset.DoesNotExist:
        pass
    else:
        if (added_fnids or removed_ids) and qtype.name == 'labelfree':
            set_component_state(dset_id, 'sampleprep', COMPSTATE_INCOMPLETE)
    set_component_state(dset_id, 'files', COMPSTATE_OK)
コード例 #2
0
ファイル: views.py プロジェクト: ypriverol/kantele
def save_or_update_files(data):
    dset_id = data['dataset_id']
    added_fnids = [x['id'] for x in data['added_files'].values()]
    if added_fnids:
        dset = models.Dataset.objects.get(pk=dset_id)
        models.DatasetRawFile.objects.bulk_create([
            models.DatasetRawFile(dataset_id=dset_id, rawfile_id=fnid)
            for fnid in added_fnids
        ])
        filemodels.RawFile.objects.filter(pk__in=added_fnids).update(
            claimed=True)
        create_dataset_job('move_files_storage', dset_id, dset.storage_loc,
                           added_fnids)
    removed_ids = [int(x['id']) for x in data['removed_files'].values()]
    if removed_ids:
        models.DatasetRawFile.objects.filter(
            dataset_id=dset_id, rawfile_id__in=removed_ids).delete()
        filemodels.RawFile.objects.filter(pk__in=removed_ids).update(
            claimed=False)
        create_dataset_job('move_stored_files_tmp', dset_id, removed_ids)
    # If files changed and labelfree, set sampleprep component status
    # to not good. Which should update the tab colour (green to red)
    try:
        qtype = models.Dataset.objects.select_related(
            'quantdataset__quanttype').get(pk=dset_id).quantdataset.quanttype
    except models.QuantDataset.DoesNotExist:
        pass
    else:
        if (added_fnids or removed_ids) and qtype.name == 'labelfree':
            set_component_state(dset_id, 'sampleprep', COMPSTATE_INCOMPLETE)
    set_component_state(dset_id, 'files', COMPSTATE_OK)
コード例 #3
0
ファイル: views.py プロジェクト: glormph/kantele
def purge_analysis(request):
    if request.method != 'POST':
        return HttpResponseNotAllowed(permitted_methods=['POST'])
    elif not request.user.is_staff:
        return HttpResponseForbidden()
    req = json.loads(request.body.decode('utf-8'))
    analysis = am.Analysis.objects.get(nextflowsearch__id=req['analysis_id'])
    if not analysis.deleted:
        return HttpResponseForbidden()
    analysis.purged = True
    analysis.save()
    jj.create_dataset_job('purge_analysis', analysis.id)
    jj.create_dataset_job('delete_analysis_directory', analysis.id)
    return HttpResponse()
コード例 #4
0
ファイル: views.py プロジェクト: glormph/kantele
def start_analysis(request):
    # queue nextflow
    if request.method != 'POST':
        return HttpResponseNotAllowed(permitted_methods=['POST'])
    req = json.loads(request.body.decode('utf-8'))
    if dm.Dataset.objects.filter(pk__in=req['dsids'], deleted=True):
    	return JsonResponse({'state': 'error', 'msg': 'Deleted datasets cannot be analyzed'})
    analysis = am.Analysis(name=req['analysisname'], user_id=request.user.id)
    analysis.save()
    strips = {}
    for dsid in req['dsids']:
        strip = req['strips'][dsid]
        if strip:
            strip = re.sub('[a-zA-Z]', '', strip)
            strips[dsid] = '-'.join([re.sub('.0$', '', str(float(x.strip()))) for x in strip.split('-')])
        else:
            strips[dsid] = False  # FIXME does that work?
            # FIXME when strip is False (as passed from javascript) we need to do something, eg long gradients 
    params = {'singlefiles': {nf: fnid for nf, fnid in req['files'].items()},
              'params': [y for x in req['params'].values() for y in x]}
    if 'sampletable' in req and len(req['sampletable']):
        params['sampletable'] = req['sampletable']
    # FIXME run_ipaw_nextflow rename job
    fname = 'run_ipaw_nextflow'
    arg_dsids = [int(x) for x in req['dsids']]
    # FIXME setnames have changed, is that ok?
    jobcheck = jj.check_existing_search_job(fname, arg_dsids, strips, req['fractions'], req['setnames'], req['wfid'], req['nfwfvid'], params)
    if jobcheck:
    	return JsonResponse({'state': 'error', 'msg': 'This analysis already exists', 'link': '/?tab=searches&search_id={}'.format(jobcheck.nextflowsearch.id)})
    job = jj.create_dataset_job(fname, arg_dsids, strips, req['fractions'], req['setnames'], analysis.id, req['wfid'], req['nfwfvid'], params)
    aj.create_nf_search_entries(analysis, req['wfid'], req['nfwfvid'], job.id)
    return JsonResponse({'state': 'ok'})
コード例 #5
0
ファイル: views.py プロジェクト: ypriverol/kantele
def start_analysis(request):
    # queue nextflow
    if request.method != 'POST':
        return HttpResponseNotAllowed(permitted_methods=['POST'])
    req = json.loads(request.body.decode('utf-8'))
    analysis = am.Analysis(name=req['analysisname'], user_id=request.user.id)
    analysis.save()
    strips = {}
    for dsid in req['dsids']:
        strip = req['strips'][dsid]
        if strip:
            strip = re.sub('[a-zA-Z]', '', strip)
            strips[dsid] = '-'.join([re.sub('.0$', '', str(float(x.strip()))) for x in strip.split('-')])
        else:
            strips[dsid] = False  # FIXME does that work?
            # FIXME when strip is False (as passed from javascript) we need to do something, eg long gradients 
    params = {'singlefiles': {nf: fnid for nf, fnid in req['files'].items()},
              'params': [y for x in req['params'].values() for y in x]}
    # FIXME run_ipaw_nextflow rename job
    fname = 'run_ipaw_nextflow'
    arg_dsids = [int(x) for x in req['dsids']]
    # FIXME do not check the analysis_id!
    # FIXME setnames have changed, is that ok?
    jobcheck = jj.check_existing_search_job(fname, arg_dsids, strips, req['fractions'], req['setnames'], req['wfid'], req['nfwfvid'], params)
    if jobcheck:
    	return JsonResponse({'state': 'error', 'msg': 'This analysis already exists', 'link': '/?tab=searches&search_id={}'.format(jobcheck.nextflowsearch.id)})
    job = jj.create_dataset_job(fname, arg_dsids, strips, req['fractions'], req['setnames'], analysis.id, req['wfid'], req['nfwfvid'], params)
    create_nf_search_entries(analysis, req['wfid'], req['nfwfvid'], job.id)
    return JsonResponse({'state': 'ok'})
コード例 #6
0
def download_px_project(request):
    # FIXME check if pxacc exists on pride and here, before creating dset
    # FIXME View checks project and returns maybe as a nicety how many files it will download.
    # FIXME if already exist, update experiment name in view
    # get or create dataset
    dset = dsviews.get_or_create_px_dset(request.POST['exp'],
                                         request.POST['px_acc'],
                                         request.POST['user_id'])
    # get or create raw/storedfiles
    date = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M')
    tmpshare = ServerShare.objects.get(name=settings.TMPSHARENAME)
    raw_ids = []
    extprod = Producer.objects.get(pk=settings.EXTERNAL_PRODUCER_ID)
    for fn in rsjobs.call_proteomexchange(request.POST['px_acc']):
        ftpurl = urlsplit(fn['downloadLink'])
        filename = os.path.split(ftpurl.path)[1]
        fakemd5 = md5()
        fakemd5.update(filename.encode('utf-8'))
        fakemd5 = fakemd5.hexdigest()
        rawfn = get_or_create_rawfile(fakemd5, filename, extprod,
                                      fn['fileSize'], date, {'claimed': True})
        raw_ids.append(rawfn['file_id'])
        if not rawfn['stored']:
            sfn = StoredFile(rawfile_id=rawfn['file_id'],
                             filetype='raw',
                             servershare=tmpshare,
                             path='',
                             filename=filename,
                             md5='',
                             checked=False)
            sfn.save()
    rsjob = jobutil.create_dataset_job('download_px_data', dset.id,
                                       request.POST['px_acc'], raw_ids,
                                       settings.TMPSHARENAME)
    return JsonResponse()
コード例 #7
0
ファイル: views.py プロジェクト: glormph/kantele
def download_px_project(request):
    # FIXME check if pxacc exists on pride and here, before creating dset
    # FIXME View checks project and returns maybe as a nicety how many files it will download.
    # FIXME if already exist, update experiment name in view
    # get or create dataset
    dset = dsviews.get_or_create_px_dset(request.POST['exp'], request.POST['px_acc'], request.POST['user_id'])
    # get or create raw/storedfiles
    date = datetime.strftime(timezone.now(), '%Y-%m-%d %H:%M')
    tmpshare = ServerShare.objects.get(name=settings.TMPSHARENAME)
    raw_ids = []
    extprod = Producer.objects.get(pk=settings.EXTERNAL_PRODUCER_ID)
    for fn in rsjobs.call_proteomexchange(request.POST['px_acc']):
        ftpurl = urlsplit(fn['downloadLink'])
        filename = os.path.split(ftpurl.path)[1]
        fakemd5 = md5()
        fakemd5.update(filename.encode('utf-8'))
        fakemd5 = fakemd5.hexdigest()
        rawfn = get_or_create_rawfile(fakemd5, filename, extprod,
                                      fn['fileSize'], date, {'claimed': True})
        raw_ids.append(rawfn['file_id'])
        if not rawfn['stored']:
            sfn = StoredFile(rawfile_id=rawfn['file_id'], filetype_id=settings.RAW_SFGROUP_ID,
                             servershare=tmpshare, path='',
                             filename=filename, md5='', checked=False)
            sfn.save()
    rsjob = jobutil.create_dataset_job(
        'download_px_data', dset.id, request.POST['px_acc'], raw_ids,
        settings.TMPSHARENAME)
    return HttpResponse()
コード例 #8
0
ファイル: views.py プロジェクト: glormph/kantele
def refine_mzmls(request, dataset_id):
    """Creates a job that runs the workflow with the latest version of the mzRefine containing NXF repo.
    Jobs and analysis entries are not created for dsets with full set of refined mzmls (403)."""
    # FIXME get analysis if it does exist, in case someone reruns?
    # Check if files lack refined mzMLs
    nr_refined = filemodels.StoredFile.objects.filter(rawfile__datasetrawfile__dataset_id=dataset_id, filetype_id=settings.REFINEDMZML_SFGROUP_ID, checked=True).count()
    nr_mzml = filemodels.StoredFile.objects.filter(rawfile__datasetrawfile__dataset_id=dataset_id, filetype_id=settings.MZML_SFGROUP_ID)
    if nr_mzml == nr_refined:
        return HttpResponseForbidden()
    dset = dsmodels.Dataset.objects.select_related('quantdataset__quanttype').get(pk=dataset_id)
    analysis = anmodels.Analysis(user_id=request.user.id, name='refine_dataset_{}'.format(dataset_id))
    analysis.save()
    if dsmodels.Dataset.objects.filter(pk=dataset_id, deleted=False).count():
        jj.create_dataset_job('refine_mzmls', dataset_id, analysis.id, settings.MZREFINER_NXFWFV_ID, 
                              settings.MZREFINER_FADB_ID, dset.quantdataset.quanttype.shortname)
    return HttpResponse()
コード例 #9
0
def create_mzmls(request, dataset_id):
    jj.create_dataset_job('convert_dataset_mzml', dataset_id)
    return HttpResponse()
コード例 #10
0
ファイル: views.py プロジェクト: ypriverol/kantele
def update_dataset(data):
    dset = models.Dataset.objects.filter(pk=data['dataset_id']).select_related(
        'runname__experiment', 'datatype').get()
    if 'newprojectname' in data:
        project = newproject_save(data)
    else:
        project = models.Project.objects.get(pk=data['project_id'])
    newexp = False
    if 'newexperimentname' in data:
        experiment = models.Experiment(name=data['newexperimentname'],
                                       project=project)
        experiment.save()
        dset.runname.experiment = experiment
        newexp = True
    else:
        experiment = models.Experiment.objects.get(pk=data['experiment_id'])
        if data['experiment_id'] != dset.runname.experiment_id:
            newexp = True
            dset.runname.experiment = experiment
    if data['runname'] != dset.runname.name or newexp:
        # Save if new experiment AND/OR new name Runname coupled 1-1 to dataset
        print('Update data')
        dset.runname.name = data['runname']
        dset.runname.save()
    # update species
    print('Update species')
    savedspecies = {
        x.species_id
        for x in models.DatasetSpecies.objects.filter(dataset_id=dset.id)
    }
    newspec = {int(x) for x in data['organism_ids']}
    models.DatasetSpecies.objects.bulk_create([
        models.DatasetSpecies(dataset_id=dset.id, species_id=spid)
        for spid in newspec.difference(savedspecies)
    ])
    models.DatasetSpecies.objects.filter(
        species_id__in=savedspecies.difference(newspec)).delete()
    dset.datatype_id = data['datatype_id']
    # update prefrac
    try:
        pfds = models.PrefractionationDataset.objects.filter(
            dataset_id=dset.id).select_related(
                'hiriefdataset', 'prefractionationfractionamount',
                'prefractionationlength').get()
    except models.PrefractionationDataset.DoesNotExist:
        pfds = False
    hrf_id, hiph_id = get_prefrac_ids()
    if not pfds and not data['prefrac_id']:
        pass
    elif not pfds and data['prefrac_id']:
        save_dataset_prefrac(dset.id, data, hrf_id)
    elif pfds and not data['prefrac_id']:
        models.PrefractionationDataset.objects.get(
            dataset_id=data['dataset_id']).delete()
    else:
        update_dataset_prefrac(pfds, data, hrf_id)
    dtype = get_datatype(data['datatype_id'])
    prefrac = get_prefrac(data['prefrac_id'])
    qprot_id = get_quantprot_id()
    new_storage_loc = get_storage_location(project, experiment, dset.runname,
                                           qprot_id, hrf_id, dtype, prefrac,
                                           data)
    if (new_storage_loc != dset.storage_loc and
            models.DatasetRawFile.objects.filter(dataset_id=dset.id).count()):
        create_dataset_job('rename_storage_loc', dset.id, dset.storage_loc,
                           new_storage_loc)
        dset.storage_loc = new_storage_loc
    dset.save()
    if data['is_corefac']:
        if dset.corefacdatasetcontact.email != data['corefaccontact']:
            dset.corefacdatasetcontact.email = data['corefaccontact']
            dset.corefacdatasetcontact.save()
    return JsonResponse({'dataset_id': dset.id})
コード例 #11
0
ファイル: views.py プロジェクト: glormph/kantele
def create_mzmls(request, dataset_id):
    if dsmodels.Dataset.objects.filter(pk=dataset_id, deleted=False).count():
        jj.create_dataset_job('convert_dataset_mzml', dataset_id)
    return HttpResponse()
コード例 #12
0
ファイル: views.py プロジェクト: glormph/kantele
def update_dataset(data):
    dset = models.Dataset.objects.filter(pk=data['dataset_id']).select_related(
        'runname__experiment', 'datatype').get()
    if 'newprojectname' in data:
        project = newproject_save(data)
    else:
        project = models.Project.objects.get(pk=data['project_id'])
    newexp = False
    if 'newexperimentname' in data:
        experiment = models.Experiment(name=data['newexperimentname'],
                                       project=project)
        experiment.save()
        dset.runname.experiment = experiment
        newexp = True
    else:
        experiment = models.Experiment.objects.get(pk=data['experiment_id'])
        if data['experiment_id'] != dset.runname.experiment_id:
            newexp = True
            dset.runname.experiment = experiment
    if data['runname'] != dset.runname.name or newexp:
        # Save if new experiment AND/OR new name Runname coupled 1-1 to dataset
        print('Update data')
        dset.runname.name = data['runname']
        dset.runname.save()
    # update species
    print('Update species')
    savedspecies = {x.species_id for x in
                    models.DatasetSpecies.objects.filter(dataset_id=dset.id)}
    newspec = {int(x) for x in data['organism_ids']}
    models.DatasetSpecies.objects.bulk_create([models.DatasetSpecies(
        dataset_id=dset.id, species_id=spid)
        for spid in newspec.difference(savedspecies)])
    models.DatasetSpecies.objects.filter(
        species_id__in=savedspecies.difference(newspec)).delete()
    dset.datatype_id = data['datatype_id']
    # update prefrac
    try:
        pfds = models.PrefractionationDataset.objects.filter(
            dataset_id=dset.id).select_related(
            'hiriefdataset', 'prefractionationfractionamount',
            'prefractionationlength').get()
    except models.PrefractionationDataset.DoesNotExist:
        pfds = False
    hrf_id, hiph_id = get_prefrac_ids()
    if not pfds and not data['prefrac_id']:
        pass
    elif not pfds and data['prefrac_id']:
        save_dataset_prefrac(dset.id, data, hrf_id)
    elif pfds and not data['prefrac_id']:
        models.PrefractionationDataset.objects.get(
            dataset_id=data['dataset_id']).delete()
    else:
        update_dataset_prefrac(pfds, data, hrf_id)
    dtype = get_datatype(data['datatype_id'])
    prefrac = get_prefrac(data['prefrac_id'])
    qprot_id = get_quantprot_id()
    new_storage_loc = get_storage_location(project, experiment, dset.runname,
                                           qprot_id, hrf_id, dtype, prefrac,
                                           data)
    if (new_storage_loc != dset.storage_loc and 
            models.DatasetRawFile.objects.filter(dataset_id=dset.id).count()):
        create_dataset_job('rename_storage_loc', dset.id, dset.storage_loc,
                           new_storage_loc)
        dset.storage_loc = new_storage_loc
    elif new_storage_loc != dset.storage_loc:
        dset.storage_loc = new_storage_loc
    dset.save()
    if data['ptype_id'] != settings.LOCAL_PTYPE_ID:
        if dset.externaldatasetcontact.email != data['externalcontact']:
            dset.externaldatasetcontact.email = data['externalcontact']
            dset.externaldatasetcontact.save()
    return JsonResponse({'dataset_id': dset.id})