def new_search(dataset_pk):
    
    ''' Perform a new database search
    '''

    current_dataset = models.Dataset.query.get_or_404(dataset_pk)
    ms2_files = current_dataset.ms2files.all()
    if not ms2_files:
        abort(404)

    params_form = forms.SearchParamsForm()

    if params_form.validate_on_submit():

        params = json.loads(json.dumps(dict(params_form.data.items()), default=lambda x: float(x) if isinstance(x, decimal.Decimal) else x))

        ##### clean and edit params #####
        params = views_helpers.clean_params(params)

        # Save new DBSearch record (including params_dict) in database
        new_dbsearch_id = views_helpers.save_new_dbsearch(dataset_pk, params=params)

        params['dbsearch_id'] = new_dbsearch_id

        remote_host = 'admin@wolanlab'
        remote_filepaths = [ms2file.file_path for ms2file in ms2_files]


        # hard-coded for now... remove later! (once on production server)
        if 'production' not in app.config['CONFIG_CLASS']:
            remote_filepaths = ['/home/admin/test_files/121614_SC_sampleH1sol_25ug_pepstd_HCD_FTMS_MS2_07_11.ms2', 
                                '/home/admin/test_files/121614_SC_sampleH1sol_25ug_pepstd_HCD_FTMS_MS2_07_11_duplicate.ms2']

        remote_directory = str(uuid4()) # random 36-character hash
        rsync_task = tasks.rsync_file.s(remote_host, remote_filepaths, new_local_directory=remote_directory).set(queue='sandip')
        split_and_create_jobs_task = tasks.split_ms2_and_make_jobs.s(params).set(queue='sandip')

        launch_submission_tasks = tasks.launch_submission_tasks.s().set(queue='sandip')
        chained_tasks = rsync_task | split_and_create_jobs_task | launch_submission_tasks
        task = chained_tasks.apply_async()

        # save task ID to local database
        current_dbsearch = models.DBSearch.query.get(new_dbsearch_id)
        current_dbsearch.celery_id = str(task)
        current_dbsearch.status = 'submitted'
        current_dbsearch.remote_directory = remote_directory
        db.session.commit()

        print(task.children) # this GroupResult ID won't be available until a few moments after launching

        app.logger.info('Launched Celery task {}'.format(task))

        return jsonify(params)

    return render_template( 'data/newsearch.html', 
                            params_form=params_form, 
                            current_dataset=current_dataset, 
                            ms2_files=ms2_files, 
                            )
def document_index():

    ''' View function for "index" page for all document types (including file uploader interface)
    '''

    upload_form = forms.DatasetUploadForm()

    recent_five_datasets = views_helpers.get_recent_records(models.Dataset, models.Dataset.uploaded_time)

    if upload_form.validate_on_submit():

        files = request.files.getlist('data_file')
        filenames = [file_obj.filename for file_obj in files]
        app.logger.info('User trying to upload files {}'.format(', '.join(filenames)))

        if not check_file_types(filenames):
            return 'Can\'t upload all of those file types... {}'.format(', '.join(filenames)) # this should return a redirect to a different view/AJAX response

        # save new uploaded file data
        try:
            ms1_file_paths = [save_new_file(file_obj) for file_obj in files if file_obj.filename.endswith('.ms1')]
            ms2_file_paths = [save_new_file(file_obj) for file_obj in files if file_obj.filename.endswith('.ms2')]
            sqt_file_paths = [save_new_file(file_obj) for file_obj in files if file_obj.filename.endswith('.sqt')]
            dta_file_paths = [save_new_file(file_obj) for file_obj in files if file_obj.filename.endswith('.txt')]
        except:
            app.logger.error('Error saving new files')
            return 'Error saving new files'

        dataset_id = None
        dbsearch_id = None
        ms1_data_ids = None
        ms2_data_ids = None
        sqt_data_ids = None
        dta_data_ids = None

        # save new dataset in database
        dataset_name = upload_form.dataset_name.data
        dataset_description = upload_form.dataset_desc.data
        try:
            dataset_id = views_helpers.save_new_dataset(dataset_name, dataset_description)
        except:
            app.logger.error('Error creating new dataset {}'.format(dataset_name))
            raise
            return None

        if ms1_file_paths:
            try:
                # save MS1 records to database
                ms1_data_ids = [views_helpers.save_new_ms1_record(dataset_id, ms1_file_path, original_filename) for ms1_file_path, original_filename in ms1_file_paths]
            except:
                # log database error and return
                app.logger.error('Error saving new MS1 file info to database')
                raise
                return None

        if ms2_file_paths:
            try:
                # save MS2 records to database
                ms2_data_ids = [views_helpers.save_new_ms2_record(dataset_id, ms2_file_path, original_filename) for ms2_file_path, original_filename in ms2_file_paths]
            except:
                # log database error and return
                app.logger.error('Error saving new MS2 file info to database')
                raise
                return None

        if sqt_file_paths or dta_file_paths:
            try:
                dbsearch_id = views_helpers.save_new_dbsearch(dataset_id) # create DBSearch
            except:
                # log DB error and return
                app.logger.error('Error saving new Database Search to database')
                raise
                return None
            if sqt_file_paths:                
                try:
                    # save SQT records to database
                    sqt_data_ids = [views_helpers.save_new_sqt_record(dbsearch_id, sqt_file_path, original_filename) for sqt_file_path, original_filename in sqt_file_paths]
                    for sqt_data_id in sqt_data_ids:
                        views_helpers.count_scans_in_file(sqt_data_id, 'sqt')
                except:
                    app.logger.error('Error saving new SQT file info to database')
                    raise
                    return None
            if dta_file_paths:
                try:
                    # save DTA records to database
                    dta_data_ids = [views_helpers.save_new_dta_record(dbsearch_id, dta_file_path, original_filename) for dta_file_path, original_filename in dta_file_paths]
                except:
                    app.logger.error('Error saving new DTA file info to database')
                    raise
                    return None

        return jsonify({'dataset_id': dataset_id, 
                        'dataset_name': dataset_name, 
                        'dataset_description': dataset_description, 
                        'dbsearch_id': dbsearch_id,
                        'ms1_data_ids': ms1_data_ids,
                        'ms2_data_ids': ms2_data_ids, 
                        'sqt_data_ids': sqt_data_ids, 
                        'dta_data_ids': dta_data_ids, 
                        })

    return render_template('data/document_index.html', upload_form=upload_form, recent_five_datasets=recent_five_datasets)