예제 #1
0
def get_metadata_by_alternative_id(recid):

    try:
        if "ins" in recid:
            recid = recid.replace("ins", "")
            record = get_records_matching_field('inspire_id', recid,
                                                doc_type=CFG_PUB_TYPE)
            record = record['hits']['hits'][0].get("_source")
            try:
                version = int(request.args.get('version', -1))
            except ValueError:
                version = -1

            output_format = request.args.get('format', 'html')
            light_mode = bool(request.args.get('light', False))

            return render_record(recid=record['recid'], record=record, version=version, output_format=output_format,
                                 light_mode=light_mode)
        else:
            log.error("Unable to find %s.", recid)
            return abort(404)

    except Exception as e:
        log.error("Unable to find %s.", recid)
        log.error(e)
        return abort(404)
예제 #2
0
def unload_submission(record_id, version=1):

    submission = get_latest_hepsubmission(publication_recid=record_id)

    if not submission:
        print('Record {0} not found'.format(record_id))
        return

    if version == submission.version:
        print('Unloading record {0} version {1}...'.format(record_id, version))
        remove_submission(record_id, version)
    else:
        print('Not unloading record {0} version {1} (latest version {2})...'.format(record_id, version, submission.version))
        return

    if version == 1:

        data_records = get_records_matching_field("related_publication", record_id)
        for record in data_records["hits"]["hits"]:
            print("\t Removed data table {0} from index".format(record["_id"]))
            try:
                delete_item_from_index(doc_type=CFG_DATA_TYPE, id=record["_id"], parent=record_id)
            except Exception as e:
                logging.error("Unable to remove {0} from index. {1}".format(record["_id"], e))

        try:
            delete_item_from_index(doc_type=CFG_PUB_TYPE, id=record_id)
            print("Removed publication {0} from index".format(record_id))
        except NotFoundError as nfe:
            print(nfe)

    print('Finished unloading record {0} version {1}.'.format(record_id, version))
예제 #3
0
def test_create_submission(app):
    """
    Test the whole submission pipeline in loading a file, ensuring the HEPSubmission object is created,
    all the files have been added, and the record has been indexed.
    :return:
    """
    with app.app_context():
        # test submission part works

        record = {'inspire_id': '19999999',
                  'title': 'HEPData Testing 1',
                  'reviewer': {'name': 'Testy McTester', 'email': '*****@*****.**'},
                  'uploader': {'name': 'Testy McTester', 'email': '*****@*****.**'},
                  'message': 'This is ready',
                  'user_id': 1}

        hepdata_submission = process_submission_payload(**record)

        assert (hepdata_submission.version == 1)
        assert (hepdata_submission.overall_status == 'todo')

        # test upload works
        base_dir = os.path.dirname(os.path.realpath(__file__))

        directory = os.path.join(base_dir, 'test_data/test_submission')
        process_submission_directory(directory, os.path.join(directory, 'submission.yaml'),
                                     hepdata_submission.publication_recid)

        data_submissions = DataSubmission.query.filter_by(
            publication_recid=hepdata_submission.publication_recid).count()
        assert (data_submissions == 8)
        assert (len(hepdata_submission.resources) == 4)
        assert (len(hepdata_submission.participants) == 4)

        do_finalise(hepdata_submission.publication_recid, force_finalise=True)

        assert (record_exists(inspire_id=record['inspire_id']))

        # Test record is in index...
        index_records = get_records_matching_field('inspire_id', record['inspire_id'], doc_type='publication')
        print(index_records)
        assert (len(index_records['hits']['hits']) == 1)

        publication_record = get_record_contents(hepdata_submission.publication_recid)

        print(publication_record)
        assert (publication_record is not None)

        ctx = format_submission(hepdata_submission.publication_recid, publication_record, hepdata_submission.version, 1,
                                hepdata_submission)

        assert(ctx is not None)

        assert(ctx['version'] == 1)
        assert (ctx['recid'] == hepdata_submission.publication_recid)
예제 #4
0
def resolve_doi_data(doi):
    """
    :param doi:
    :return:
    """
    matching = get_records_matching_field('doi',
                                          doi,
                                          source={"includes": ['inspire_id']})
    if matching.get('hits').get('total') > 0:
        _returned = matching.get('hits').get('hits')[0].get('_source').get(
            'inspire_id')
        return redirect('/record/ins{0}'.format(_returned))
    return abort(404)
예제 #5
0
파일: api.py 프로젝트: HEPData/hepdata3
def update_submissions(inspire_ids_to_update, only_record_information=False):
    migrator = Migrator()
    for index, inspire_id in enumerate(inspire_ids_to_update):
        _cleaned_id = inspire_id.replace("ins", "")
        _matching_records = get_records_matching_field('inspire_id', _cleaned_id)
        if len(_matching_records['hits']['hits']) >= 1:
            print('The record with id {} will be updated now'.format(inspire_id))
            recid = _matching_records['hits']['hits'][0]['_source']['recid']
            if 'related_publication' in _matching_records['hits']['hits'][0]['_source']:
                recid = _matching_records['hits']['hits'][0]['_source']['related_publication']
            migrator.update_file.delay(inspire_id, recid,
                                       only_record_information)
        else:
            log.error('No record exists with id {0}. You should load this file first.'.format(inspire_id))
예제 #6
0
def get_doi_banner(doi):
    """

    :param doi:
    :return:
    """

    matching = get_records_matching_field('doi',
                                          doi,
                                          source={"includes": ['inspire_id']})
    if matching.get('hits').get('total') > 0:
        print(matching)
        return send_file(
            os.path.join(base_dir, 'static/img/hepdata-doi-banner.png'))
    else:
        return send_file(os.path.join(base_dir, 'static/img/1px.png'))
예제 #7
0
파일: cli.py 프로젝트: drjova/hepdata
def find_duplicates_and_remove():
    """Will go through the application to find any duplicates then remove them."""
    inspire_ids = get_all_ids_in_current_system(prepend_id_with="")

    duplicates = []
    for inspire_id in inspire_ids:
        matches = get_records_matching_field('inspire_id', inspire_id,
                                             doc_type=CFG_PUB_TYPE)
        if len(matches['hits']['hits']) > 1:
            duplicates.append(matches['hits']['hits'][0]['_source']['recid'])
    print('There are {} duplicates. Going to remove.'.format(len(duplicates)))
    do_unload(duplicates)

    # reindex submissions for dashboard view
    admin_indexer = AdminIndexer()
    admin_indexer.reindex(recreate=True)
예제 #8
0
def resolve_doi_data(doi):
    """
    Resolve a journal DOI to the corresponding HEPData record.\n
    Route: ``/doidata/<path:doi>``

    :param doi: DOI of journal article
    :return: redirect to HEPData record (or 404 if it doesn't exist)
    """
    matching = get_records_matching_field('doi',
                                          doi,
                                          source={"includes": ['inspire_id']})
    if matching.get('hits').get('total') > 0:
        _returned = matching.get('hits').get('hits')[0].get('_source').get(
            'inspire_id')
        return redirect('/record/ins{0}'.format(_returned))
    return abort(404)
예제 #9
0
def get_doi_banner(doi):
    """
    Return either a HEPData image or a 1-pixel image depending on whether a HEPData record
    with a given journal DOI exists.\n
    Route: ``/doibanner/<path:doi>``

    :param doi: DOI of journal article
    :return: send_file
    """

    matching = get_records_matching_field('doi',
                                          doi,
                                          source={"includes": ['inspire_id']})
    if matching.get('hits').get('total') > 0:
        print(matching)
        return send_file(
            os.path.join(base_dir, 'static/img/hepdata-doi-banner.png'))
    else:
        return send_file(os.path.join(base_dir, 'static/img/1px.png'))
예제 #10
0
def unload_submission(record_id):
    print('unloading {}...'.format(record_id))
    remove_submission(record_id)

    data_records = get_records_matching_field("related_publication", record_id)
    for record in data_records["hits"]["hits"]:
        print("\t Removed data table {0} from index".format(record["_id"]))
        try:
            delete_item_from_index(doc_type=CFG_DATA_TYPE, id=record["_id"], parent=record_id)
        except Exception as e:
            logging.error("Unable to remove {0} from index. {1}".format(record["_id"], e))

    try:
        delete_item_from_index(doc_type=CFG_PUB_TYPE, id=record_id)
        print("Removed publication {0} from index".format(record_id))
    except NotFoundError as nfe:
        print(nfe.message)

    print('Finished unloading {0}.'.format(record_id))
예제 #11
0
def unload_submission(record_id):
    print('unloading {}...'.format(record_id))
    remove_submission(record_id)

    data_records = get_records_matching_field("related_publication", record_id)
    for record in data_records["hits"]["hits"]:
        print("\t Removed data table {0} from index".format(record["_id"]))
        try:
            delete_item_from_index(doc_type=CFG_DATA_TYPE, id=record["_id"], parent=record_id)
        except Exception as e:
            logging.error("Unable to remove {0} from index. {1}".format(record["_id"], e))

    try:
        delete_item_from_index(doc_type=CFG_PUB_TYPE, id=record_id)
        print("Removed publication {0} from index".format(record_id))
    except NotFoundError as nfe:
        print(nfe.message)

    print('Finished unloading {0}.'.format(record_id))
예제 #12
0
파일: cli.py 프로젝트: islahudinees/hepdata
def find_duplicates_and_remove(base_url):
    """Will go through the application to find any duplicates then remove them."""
    inspire_ids = importer_api.get_inspire_ids(base_url=base_url)
    if inspire_ids is not False:
        duplicates = []
        for inspire_id in inspire_ids:
            matches = get_records_matching_field('inspire_id',
                                                 inspire_id,
                                                 doc_type=CFG_PUB_TYPE)
            if len(matches['hits']['hits']) > 1:
                duplicates.append(
                    matches['hits']['hits'][0]['_source']['recid'])
        print('There are {} duplicates. Going to remove.'.format(
            len(duplicates)))
        do_unload(duplicates)

        # reindex submissions for dashboard view
        admin_indexer = AdminIndexer()
        admin_indexer.reindex(recreate=True)
예제 #13
0
파일: cli.py 프로젝트: HEPData/hepdata3
def find_duplicates_and_remove():
    """
    Will go through the application to find any duplicates then remove them.
    :return:
    """
    inspire_ids = get_all_ids_in_current_system(prepend_id_with="")

    duplicates = []
    for inspire_id in inspire_ids:
        matches = get_records_matching_field('inspire_id', inspire_id,
                                             doc_type=CFG_PUB_TYPE)
        if len(matches['hits']['hits']) > 1:
            duplicates.append(matches['hits']['hits'][0]['_source']['recid'])
    print('There are {} duplicates. Going to remove.'.format(len(duplicates)))
    do_unload(duplicates)

    # reindex submissions for dashboard view
    admin_indexer = AdminIndexer()
    admin_indexer.reindex(recreate=True)
예제 #14
0
def test_load_file(app, migrator):
    with app.app_context():
        success = migrator.load_file('ins1487726')
        assert (success)

        hepsubmission = get_latest_hepsubmission(inspire_id='1487726')

        assert (hepsubmission is not None)
        assert (hepsubmission.inspire_id == '1487726')

        index_records = get_records_matching_field('inspire_id',
                                                   hepsubmission.inspire_id,
                                                   doc_type='publication')
        assert (len(index_records['hits']['hits']) == 1)

        publication_record = get_record_contents(
            hepsubmission.publication_recid)
        assert (publication_record is not None)

        data_submissions = DataSubmission.query.filter_by(
            publication_recid=hepsubmission.publication_recid).count()
        assert (data_submissions == 5)
예제 #15
0
파일: api.py 프로젝트: ruphy/hepdata
def update_submissions(inspire_ids_to_update,
                       force=False,
                       only_record_information=False):
    migrator = Migrator()
    for index, inspire_id in enumerate(inspire_ids_to_update):
        _cleaned_id = inspire_id.replace("ins", "")
        _matching_records = get_records_matching_field("inspire_id",
                                                       _cleaned_id)
        if len(_matching_records["hits"]["hits"]) >= 1:
            recid = _matching_records["hits"]["hits"][0]["_source"]["recid"]
            if "related_publication" in _matching_records["hits"]["hits"][0][
                    "_source"]:
                recid = _matching_records["hits"]["hits"][0]["_source"][
                    "related_publication"]
            print(
                "The record with inspire_id {} and recid {} will be updated now"
                .format(inspire_id, recid))
            migrator.update_file.delay(inspire_id, recid, force,
                                       only_record_information)
        else:
            log.error(
                "No record exists with id {0}. You should load this file first."
                .format(inspire_id))
예제 #16
0
파일: views.py 프로젝트: HEPData/hepdata3
def get_metadata_by_alternative_id(recid):
    try:
        if "ins" in recid:
            recid = recid.replace("ins", "")
            record = get_records_matching_field("inspire_id", recid, doc_type=CFG_PUB_TYPE)
            record = record["hits"]["hits"][0].get("_source")
            version = int(request.args.get("version", -1))

            output_format = request.args.get("format", "html")
            light_mode = bool(request.args.get("light", False))

            return render_record(
                recid=record["recid"],
                record=record,
                version=version,
                output_format=output_format,
                light_mode=light_mode,
            )
    except Exception as e:

        log.error("Unable to find {0}.".format(recid))
        log.error(e)
        return render_template("hepdata_theme/404.html")
예제 #17
0
def test_create_submission(app, admin_idx):
    """
    Test the whole submission pipeline in loading a file, ensuring the HEPSubmission object is created,
    all the files have been added, and the record has been indexed.
    :return:
    """
    with app.app_context():

        admin_idx.recreate_index()

        # test submission part works

        record = {
            'inspire_id': '19999999',
            'title': 'HEPData Testing 1',
            'reviewer': {
                'name': 'Testy McTester',
                'email': '*****@*****.**'
            },
            'uploader': {
                'name': 'Testy McTester',
                'email': '*****@*****.**'
            },
            'message': 'This is ready',
            'user_id': 1
        }

        hepdata_submission = process_submission_payload(**record)

        assert (hepdata_submission.version == 1)
        assert (hepdata_submission.overall_status == 'todo')

        # test upload works
        base_dir = os.path.dirname(os.path.realpath(__file__))

        test_directory = os.path.join(base_dir, 'test_data/test_submission')
        time_stamp = str(int(round(time.time())))
        directory = get_data_path_for_record(
            hepdata_submission.publication_recid, time_stamp)
        shutil.copytree(test_directory, directory)
        assert (os.path.exists(directory))

        process_submission_directory(
            directory, os.path.join(directory, 'submission.yaml'),
            hepdata_submission.publication_recid)

        admin_idx_results = admin_idx.search(
            term=hepdata_submission.publication_recid, fields=['recid'])
        assert (admin_idx_results is not None)

        data_submissions = DataSubmission.query.filter_by(
            publication_recid=hepdata_submission.publication_recid).count()
        assert (data_submissions == 8)
        assert (len(hepdata_submission.resources) == 4)
        assert (len(hepdata_submission.participants) == 4)

        do_finalise(hepdata_submission.publication_recid,
                    force_finalise=True,
                    convert=False)

        assert (record_exists(inspire_id=record['inspire_id']))

        # Test record is in index...
        index_records = get_records_matching_field('inspire_id',
                                                   record['inspire_id'],
                                                   doc_type='publication')
        assert (len(index_records['hits']['hits']) == 1)

        publication_record = get_record_contents(
            hepdata_submission.publication_recid)

        assert (publication_record is not None)

        ctx = format_submission(hepdata_submission.publication_recid,
                                publication_record, hepdata_submission.version,
                                1, hepdata_submission)

        assert (ctx is not None)

        assert (ctx['version'] == 1)
        assert (ctx['recid'] == hepdata_submission.publication_recid)

        # remove the submission and test that all is remove

        unload_submission(hepdata_submission.publication_recid)

        assert (not record_exists(inspire_id=record['inspire_id']))

        data_submissions = DataSubmission.query.filter_by(
            publication_recid=hepdata_submission.publication_recid).count()

        assert (data_submissions == 0)

        sleep(2)

        admin_idx_results = admin_idx.search(
            term=hepdata_submission.publication_recid, fields=['recid'])
        assert (len(admin_idx_results) == 0)

        # Check file dir has been deleted
        assert (not os.path.exists(directory))
예제 #18
0
def do_finalise(recid, publication_record=None, force_finalise=False,
                commit_message=None, send_tweet=False, update=False):
    """
        Creates record SIP for each data record with a link to the associated
        publication
        :param synchronous: if true then workflow execution and creation is
        waited on, then everything is indexed in one go.
        If False, object creation is asynchronous, however reindexing is not
        performed. This is only really useful for the full migration of
        content.
    """
    hep_submission = HEPSubmission.query.filter_by(
        publication_recid=recid, overall_status="todo").first()

    print('Finalising record {}'.format(recid))

    generated_record_ids = []
    if hep_submission \
        and (force_finalise or hep_submission.coordinator == int(current_user.get_id())):

        submissions = DataSubmission.query.filter_by(
            publication_recid=recid,
            version=hep_submission.version).all()

        version = hep_submission.version

        existing_submissions = {}
        if hep_submission.version > 1 or update:
            # we need to determine which are the existing record ids.
            existing_data_records = get_records_matching_field(
                'related_publication', recid, doc_type=CFG_DATA_TYPE)

            for record in existing_data_records["hits"]["hits"]:

                if "recid" in record["_source"]:
                    existing_submissions[record["_source"]["title"]] = \
                        record["_source"]["recid"]
                    delete_item_from_index(record["_id"],
                                           doc_type=CFG_DATA_TYPE, parent=record["_source"]["related_publication"])

        current_time = "{:%Y-%m-%d %H:%M:%S}".format(datetime.now())

        for submission in submissions:
            finalise_datasubmission(current_time, existing_submissions,
                                    generated_record_ids,
                                    publication_record, recid, submission,
                                    version)

        try:
            record = get_record_by_id(recid)
            # If we have a commit message, then we have a record update.
            # We will store the commit message and also update the
            # last_updated flag for the record.
            record['hepdata_doi'] = hep_submission.doi

            if commit_message:
                # On a revision, the last updated date will
                # be the current date.
                hep_submission.last_updated = datetime.now()

                commit_record = RecordVersionCommitMessage(
                    recid=recid,
                    version=version,
                    message=str(commit_message))

                db.session.add(commit_record)

            record['last_updated'] = datetime.strftime(
                hep_submission.last_updated, '%Y-%m-%d %H:%M:%S')
            record['version'] = version

            record.commit()

            hep_submission.inspire_id = record['inspire_id']
            hep_submission.overall_status = "finished"
            db.session.add(hep_submission)

            db.session.commit()

            create_celery_app(current_app)

            # only mint DOIs if not testing.
            if not current_app.config.get('TESTING', False) and not current_app.config.get('NO_DOI_MINTING', False):
                for submission in submissions:
                    generate_doi_for_data_submission.delay(submission.id, submission.version)

                generate_doi_for_submission.delay(recid, version)

            # Reindex everything.
            index_record_ids([recid] + generated_record_ids)
            push_data_keywords(pub_ids=[recid])

            admin_indexer = AdminIndexer()
            admin_indexer.index_submission(hep_submission)

            send_finalised_email(hep_submission)

            for file_format in ['csv', 'yoda', 'root']:
                convert_and_store.delay(hep_submission.inspire_id, file_format, force=True)

            if send_tweet:
                tweet(record.get('title'), record.get('collaborations'),
                      "http://www.hepdata.net/record/ins{0}".format(record.get('inspire_id')))

            return json.dumps({"success": True, "recid": recid,
                               "data_count": len(submissions),
                               "generated_records": generated_record_ids})

        except NoResultFound:
            print('No record found to update. Which is super strange.')

    else:
        return json.dumps(
            {"success": False, "recid": recid,
             "errors": ["You do not have permission to finalise this "
                        "submission. Only coordinators can do that."]})
예제 #19
0
def remove_submission(record_id):
    """
    Removes the database entries related to a record.
    :param record_id:
    :return: True if Successful, False if the record does not exist.
    """

    hepdata_submissions = HEPSubmission.query.filter_by(
        publication_recid=record_id).all()

    try:
        try:
            for hepdata_submission in hepdata_submissions:
                db.session.delete(hepdata_submission)
        except NoResultFound as nrf:
            print(nrf.args)

        submissions = DataSubmission.query.filter_by(
            publication_recid=record_id).all()

        reviews = DataReview.query.filter_by(
            publication_recid=record_id).all()

        for review in reviews:
            db.session.delete(review)

        for submission in submissions:

            resource = DataResource.query.filter_by(
                id=submission.data_file).first()

            db.session.delete(submission)

            if resource:
                db.session.delete(resource)

        try:
            SubmissionParticipant.query.filter_by(
                publication_recid=record_id).delete()
        except Exception:
            print("Unable to find a submission participant for {0}".format(record_id))

        try:
            record = get_record_by_id(record_id)
            data_records = get_records_matching_field(
                'related_publication', record_id, doc_type=CFG_DATA_TYPE)

            if 'hits' in data_records:
                for data_record in data_records['hits']['hits']:
                    data_record_obj = get_record_by_id(data_record['_source']['recid'])
                    if data_record_obj:
                        data_record_obj.delete()
            if record:
                record.delete()
        except PIDDoesNotExistError as e:
            print('No record entry exists for {0}. Proceeding to delete other files.'.format(record_id))

        db.session.commit()
        db.session.flush()
        return True

    except Exception as e:
        db.session.rollback()
        raise e
예제 #20
0
def remove_submission(record_id):
    """
    Removes the database entries related to a record.
    :param record_id:
    :return: True if Successful, False if the record does not exist.
    """

    hepdata_submissions = HEPSubmission.query.filter_by(
        publication_recid=record_id).all()

    try:
        try:
            for hepdata_submission in hepdata_submissions:
                db.session.delete(hepdata_submission)
        except NoResultFound as nrf:
            print(nrf.args)

        admin_idx = AdminIndexer()
        admin_idx.find_and_delete(term=record_id, fields=['recid'])

        submissions = DataSubmission.query.filter_by(
            publication_recid=record_id).all()

        reviews = DataReview.query.filter_by(publication_recid=record_id).all()

        for review in reviews:
            db.session.delete(review)

        for submission in submissions:

            resource = DataResource.query.filter_by(
                id=submission.data_file).first()

            db.session.delete(submission)

            if resource:
                db.session.delete(resource)

        try:
            SubmissionParticipant.query.filter_by(
                publication_recid=record_id).delete()
        except Exception:
            print("Unable to find a submission participant for {0}".format(
                record_id))

        try:
            record = get_record_by_id(record_id)
            data_records = get_records_matching_field('related_publication',
                                                      record_id,
                                                      doc_type=CFG_DATA_TYPE)

            if 'hits' in data_records:
                for data_record in data_records['hits']['hits']:
                    data_record_obj = get_record_by_id(
                        data_record['_source']['recid'])
                    if data_record_obj:
                        data_record_obj.delete()
            if record:
                record.delete()

        except PIDDoesNotExistError as e:
            print(
                'No record entry exists for {0}. Proceeding to delete other files.'
                .format(record_id))

        db.session.commit()
        db.session.flush()
        return True

    except Exception as e:
        db.session.rollback()
        raise e
예제 #21
0
def do_finalise(recid,
                publication_record=None,
                force_finalise=False,
                commit_message=None,
                send_tweet=False,
                update=False,
                convert=True):
    """
        Creates record SIP for each data record with a link to the associated
        publication
        :param synchronous: if true then workflow execution and creation is
        waited on, then everything is indexed in one go.
        If False, object creation is asynchronous, however reindexing is not
        performed. This is only really useful for the full migration of
        content.
    """
    print('Finalising record {}'.format(recid))

    hep_submission = HEPSubmission.query.filter_by(
        publication_recid=recid, overall_status="todo").first()

    generated_record_ids = []
    if hep_submission \
        and (force_finalise or hep_submission.coordinator == int(current_user.get_id())):

        submissions = DataSubmission.query.filter_by(
            publication_recid=recid, version=hep_submission.version).all()

        version = hep_submission.version

        existing_submissions = {}
        if hep_submission.version > 1 or update:
            # we need to determine which are the existing record ids.
            existing_data_records = get_records_matching_field(
                'related_publication', recid, doc_type=CFG_DATA_TYPE)

            for record in existing_data_records["hits"]["hits"]:

                if "recid" in record["_source"]:
                    existing_submissions[record["_source"]["title"]] = \
                        record["_source"]["recid"]
                    delete_item_from_index(
                        record["_id"],
                        doc_type=CFG_DATA_TYPE,
                        parent=record["_source"]["related_publication"])

        current_time = "{:%Y-%m-%d %H:%M:%S}".format(datetime.now())

        for submission in submissions:
            finalise_datasubmission(current_time, existing_submissions,
                                    generated_record_ids, publication_record,
                                    recid, submission, version)

        try:
            record = get_record_by_id(recid)
            # If we have a commit message, then we have a record update.
            # We will store the commit message and also update the
            # last_updated flag for the record.
            record['hepdata_doi'] = hep_submission.doi

            if commit_message:
                # On a revision, the last updated date will
                # be the current date.
                hep_submission.last_updated = datetime.now()

                commit_record = RecordVersionCommitMessage(
                    recid=recid, version=version, message=str(commit_message))

                db.session.add(commit_record)

            record['last_updated'] = datetime.strftime(
                hep_submission.last_updated, '%Y-%m-%d %H:%M:%S')
            record['version'] = version

            record.commit()

            hep_submission.inspire_id = record['inspire_id']
            hep_submission.overall_status = "finished"
            db.session.add(hep_submission)

            db.session.commit()

            create_celery_app(current_app)

            # only mint DOIs if not testing.
            if not current_app.config.get(
                    'TESTING', False) and not current_app.config.get(
                        'NO_DOI_MINTING', False):
                for submission in submissions:
                    generate_doi_for_data_submission.delay(
                        submission.id, submission.version)
                log.info("Generating DOIs for ins{0}".format(
                    hep_submission.inspire_id))
                generate_doi_for_submission.delay(recid, version)

            # Reindex everything.
            index_record_ids([recid] + generated_record_ids)
            push_data_keywords(pub_ids=[recid])

            try:
                admin_indexer = AdminIndexer()
                admin_indexer.index_submission(hep_submission)
            except ConnectionTimeout as ct:
                log.error('Unable to add ins{0} to admin index.\n{1}'.format(
                    hep_submission.inspire_id, ct))

            send_finalised_email(hep_submission)

            if convert:
                for file_format in ['yaml', 'csv', 'yoda', 'root']:
                    convert_and_store.delay(hep_submission.inspire_id,
                                            file_format,
                                            force=True)

            if send_tweet:
                tweet(
                    record.get('title'), record.get('collaborations'),
                    "http://www.hepdata.net/record/ins{0}".format(
                        record.get('inspire_id')), version)

            return json.dumps({
                "success": True,
                "recid": recid,
                "data_count": len(submissions),
                "generated_records": generated_record_ids
            })

        except NoResultFound:
            print('No record found to update. Which is super strange.')

    else:
        return json.dumps({
            "success":
            False,
            "recid":
            recid,
            "errors": [
                "You do not have permission to finalise this "
                "submission. Only coordinators can do that."
            ]
        })