def manage(self, req, form):
        """ Web interface for the management of the info space """
        uid = getUid(req)
        argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})

        # If it is an Ajax request, extract any JSON data.
        ajax_request = False
        if 'jsondata' in form:
            json_data = json.loads(str(form['jsondata']))
            json_data = json_unicode_to_utf8(json_data)
            ajax_request = True
            json_response = {}

        # Authorization.
        user_info = collect_user_info(req)
        if user_info['email'] == 'guest':
            # User is not logged in.
            if not ajax_request:
                # Do not display the introductory recID selection box to guest
                # users (as it used to be with v0.99.0):
                dummy_auth_code, auth_message = acc_authorize_action(
                    req, 'runinfomanager')
                referer = '/info'
                return page_not_authorized(req=req,
                                           referer=referer,
                                           text=auth_message)
            else:
                # Session has most likely timed out.
                json_response.update({'status': "timeout"})
                return json.dumps(json_response)
        # Handle request.
        if not ajax_request:
            body, errors, warnings = perform_request_init_info_interface()
            title = 'Info Space Manager'
            return page(title=title,
                        body=body,
                        errors=errors,
                        warnings=warnings,
                        uid=uid,
                        language=argd['ln'],
                        req=req)
        else:
            # Handle AJAX request.
            if json_data["action"] == "listFiles":
                json_response.update(
                    perform_request_edit_file(json_data["filename"]))
                try:
                    return json.dumps(json_response)
                except UnicodeDecodeError:
                    # Error decoding, the file can be a pdf, image or any kind
                    # of file non-editable
                    return json.dumps({"status": "error_file_not_readable"})

            if json_data["action"] == "saveContent":
                return json.dumps(
                    perform_request_save_file(json_data["filename"],
                                              json_data["filecontent"]))
    def manage(self, req, form):
        """ Web interface for the management of the info space """
        uid = getUid(req)
        argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})

        # If it is an Ajax request, extract any JSON data.
        ajax_request = False
        if 'jsondata' in form:
            json_data = json.loads(str(form['jsondata']))
            json_data = json_unicode_to_utf8(json_data)
            ajax_request = True
            json_response = {}

        # Authorization.
        user_info = collect_user_info(req)
        if user_info['email'] == 'guest':
            # User is not logged in.
            if not ajax_request:
                # Do not display the introductory recID selection box to guest
                # users (as it used to be with v0.99.0):
                dummy_auth_code, auth_message = acc_authorize_action(req,
                                                                     'runinfomanager')
                referer = '/info'
                return page_not_authorized(req=req, referer=referer,
                                           text=auth_message)
            else:
                # Session has most likely timed out.
                json_response.update({'status': "timeout"})
                return json.dumps(json_response)
        # Handle request.
        if not ajax_request:
            body, errors, warnings = perform_request_init_info_interface()
            title = 'Info Space Manager'
            return page(title=title,
                        body=body,
                        errors=errors,
                        warnings=warnings,
                        uid=uid,
                        language=argd['ln'],
                        req=req)
        else:
            # Handle AJAX request.
            if json_data["action"] == "listFiles":
                json_response.update(perform_request_edit_file(json_data["filename"]))
                try:
                    return json.dumps(json_response)
                except UnicodeDecodeError:
                    # Error decoding, the file can be a pdf, image or any kind
                    # of file non-editable
                    return json.dumps({"status": "error_file_not_readable"})

            if json_data["action"] == "saveContent":
                return json.dumps(perform_request_save_file(json_data["filename"],
                                                            json_data["filecontent"]))
Пример #3
0
    def templates(self, req, form):
        """handle a edit/templates request"""
        uid = getUid(req)
        argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})

        # If it is an Ajax request, extract any JSON data.
        ajax_request = False
        if form.has_key('jsondata'):
            json_data = json.loads(str(form['jsondata']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            json_data = json_unicode_to_utf8(json_data)
            ajax_request = True
            json_response = {'resultCode': 0}

        # Authorization.
        user_info = collect_user_info(req)
        if user_info['email'] == 'guest':
            # User is not logged in.
            if not ajax_request:
                # Do not display the introductory recID selection box to guest
                # users (as it used to be with v0.99.0):
                dummy_auth_code, auth_message = acc_authorize_action(
                    req, 'runbibedit')
                referer = '/edit'
                return page_not_authorized(req=req,
                                           referer=referer,
                                           text=auth_message,
                                           navtrail=navtrail)
            else:
                # Session has most likely timed out.
                json_response.update({'resultCode': 100})
                return json.dumps(json_response)
        # Handle request.
        if not ajax_request:
            # Show BibEdit template management start page.
            body, errors, warnings = perform_request_init_template_interface()
            title = 'Record Editor Template Manager'
            return page(title=title,
                        body=body,
                        errors=errors,
                        warnings=warnings,
                        uid=uid,
                        language=argd['ln'],
                        navtrail=navtrail_bibedit,
                        lastupdated=__lastupdated__,
                        req=req,
                        body_css_classes=['bibedit'])
        else:
            # Handle AJAX request.
            json_response.update(
                perform_request_ajax_template_interface(json_data))
            return json.dumps(json_response)
Пример #4
0
    def templates(self, req, form):
        """handle a edit/templates request"""
        uid = getUid(req)
        argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})

        # If it is an Ajax request, extract any JSON data.
        ajax_request = False
        if form.has_key('jsondata'):
            json_data = json.loads(str(form['jsondata']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            json_data = json_unicode_to_utf8(json_data)
            ajax_request = True
            json_response = {'resultCode': 0}

        # Authorization.
        user_info = collect_user_info(req)
        if user_info['email'] == 'guest':
            # User is not logged in.
            if not ajax_request:
                # Do not display the introductory recID selection box to guest
                # users (as it used to be with v0.99.0):
                dummy_auth_code, auth_message = acc_authorize_action(req,
                                                                     'runbibedit')
                referer = '/edit'
                return page_not_authorized(req=req, referer=referer,
                                           text=auth_message, navtrail=navtrail)
            else:
                # Session has most likely timed out.
                json_response.update({'resultCode': 100})
                return json.dumps(json_response)
        # Handle request.
        if not ajax_request:
            # Show BibEdit template management start page.
            body, errors, warnings = perform_request_init_template_interface()
            title = 'Record Editor Template Manager'
            return page(title       = title,
                        body        = body,
                        errors      = errors,
                        warnings    = warnings,
                        uid         = uid,
                        language    = argd['ln'],
                        navtrail    = navtrail_bibedit,
                        lastupdated = __lastupdated__,
                        req         = req,
                        body_css_classes = ['bibedit'])
        else:
            # Handle AJAX request.
            json_response.update(perform_request_ajax_template_interface(json_data))
            return json.dumps(json_response)
Пример #5
0
    def doilookup(self, req, form):
        """
        Returns the metadata from the crossref website based on the DOI.
        """
        args = wash_urlargd(form, {'doi': (str, '')})
        response = defaultdict(list)
        if args['doi']:
            doi = args['doi']
            try:
                marcxml_template = get_marcxml_for_doi(doi)
            except CrossrefError:
                # Just ignore Crossref errors
                pass
            else:
                record = create_record(marcxml_template)[0]
                if record:
                    # We need to convert this record structure to a simple dictionary
                    for key, value in record.items(
                    ):  # key, value = (773, [([('0', 'PER:64142'), ...], ' ', ' ', '', 47)])
                        for val in value:  # val = ([('0', 'PER:64142'), ...], ' ', ' ', '', 47)
                            ind1 = val[1].replace(" ", "_")
                            ind2 = val[2].replace(" ", "_")
                            for (k, v) in val[0]:  # k, v = ('0', 'PER:5409')
                                response[key + ind1 + ind2 + k].append(v)
            # The output dictionary is something like:
            # {"100__a": ['Smith, J.'],
            #  "700__a": ['Anderson, J.', 'Someoneelse, E.'],
            #  "700__u": ['University1', 'University2']}

        # return dictionary as JSON
        return json.dumps(response)
Пример #6
0
def get_kb_mappings_embedded_json(kb_name="",
                                  key="",
                                  value="",
                                  match_type="s",
                                  limit=None):
    """Get leftside/rightside mappings from kb kb_name formatted as json dict.
       The rightside is actually considered as a json string and hence embedded
       within the final result.

       If key given, give only those with left side (mapFrom) = key.
       If value given, give only those with right side (mapTo) = value.

       @param kb_name: the name of the kb
       @param key: include only lines matching this on left side in the results
       @param value: include only lines matching this on right side in the results
       @param match_type: s = substring match, e = exact match
       @param limit: maximum number of results to return (are ALL if set to None)
       @return a list of mappings
    """
    mappings = get_kb_mappings(kb_name, key, value, match_type)
    ret = []
    if limit is None:
        limit = len(mappings)
    for m in mappings[:limit]:
        label = m['value'] or m['key']
        value = m['key'] or m['value']
        ret.append({'label': label, 'value': json.loads(value)})
    return json.dumps(ret)
Пример #7
0
def dump_metadata(input_file, output_file, meta_type="ffprobe"):
    """Dumps the metadata from a given video to the given file
    The output will be in JSON or XML
    @param input_file: Full path to the video
    @param output_file: Full path to the JSON dump file
    @param type: Metadata style/library to use,
                 either ffprobe, mediainfo or pbcore
    """
    metadata_dict = None
    if not meta_type in ('ffprobe', 'mediainfo', 'pbcore'):
        raise ValueError("Type must be ffprobe, pbcore or mediainfo")
    if meta_type == 'ffprobe':
        metadata_dict = ffprobe_metadata(input_file)
    elif meta_type == 'mediainfo':
        metadata_dict = mediainfo_metadata(input_file)
    if metadata_dict is not None:
        metadata_string = json.dumps(metadata_dict, sort_keys=True, indent=4)
        file = open(output_file, "w")
        file.write(metadata_string)
        file.close()
    ## Dump PBCORE
    else:
        pbcore = pbcore_metadata(input_file)
        file = open(output_file, "w")
        file.write(pbcore)
        file.close()
Пример #8
0
def dump_metadata(input_file, output_file, meta_type="ffprobe"):
    """Dumps the metadata from a given video to the given file
    The output will be in JSON or XML
    @param input_file: Full path to the video
    @param output_file: Full path to the JSON dump file
    @param type: Metadata style/library to use,
                 either ffprobe, mediainfo or pbcore
    """
    metadata_dict = None
    if not meta_type in ('ffprobe', 'mediainfo', 'pbcore'):
        raise ValueError("Type must be ffprobe, pbcore or mediainfo")
    if meta_type == 'ffprobe':
        metadata_dict = ffprobe_metadata(input_file)
    elif meta_type == 'mediainfo':
        metadata_dict = mediainfo_metadata(input_file)
    if metadata_dict is not None:
        metadata_string = json.dumps(metadata_dict, sort_keys=True, indent=4)
        file = open(output_file, "w")
        file.write(metadata_string)
        file.close()
    ## Dump PBCORE
    else:
        pbcore = pbcore_metadata(input_file)
        file = open(output_file, "w")
        file.write(pbcore)
        file.close()
Пример #9
0
def update_redirection(label, plugin, parameters=None):
    """
    Update an existing redirection from /goto/<LABEL> to the URL returned by
    running the given plugin (as available in CFG_GOTO_PLUGINS), with the given
    parameters.

    @param label: the uniquely identifying label for this redirection
    @type label: string

    @param plugin: the algorithm that should resolve the redirection, usually:
        "goto_plugin_FOO"
    @type plugin: string

    @param parameters: further parameters that should be passed to the plugin.
        This should be a dictionary or None. Note that these parameters could
        be overridden by the query parameters.
    @type parameters: dict or None

    @raises: ValueError in case the label does not already exist.

    @note: parameters are going to be serialized to JSON before being stored
        in the DB. Hence only JSON-serializable values should be put there.
    """
    if not run_sql("SELECT label FROM goto WHERE label=%s", (label, )):
        raise ValueError("%s label does not already exist" % label)
    if plugin not in CFG_GOTO_PLUGINS:
        raise ValueError("%s plugin does not exist" % plugin)
    if parameters is None:
        parameters = {}
    try:
        parameters.items() ## dummy test to see if it exposes the dict interface
        json_parameters = json.dumps(parameters)
    except Exception, err:
        raise ValueError("The parameters %s do not specify a valid JSON map: %s" % (parameters, err))
Пример #10
0
    def doilookup(self, req, form):
        """
        Returns the metadata from the crossref website based on the DOI.
        """
        args = wash_urlargd(form, {
            'doi': (str, '')})
        response = defaultdict(list)
        if args['doi']:
            doi = args['doi']
            try:
                marcxml_template = get_marcxml_for_doi(doi)
            except CrossrefError:
                # Just ignore Crossref errors
                pass
            else:
                record = create_record(marcxml_template)[0]
                if record:
                    # We need to convert this record structure to a simple dictionary
                    for key, value in record.items():  # key, value = (773, [([('0', 'PER:64142'), ...], ' ', ' ', '', 47)])
                        for val in value:  # val = ([('0', 'PER:64142'), ...], ' ', ' ', '', 47)
                            ind1 = val[1].replace(" ", "_")
                            ind2 = val[2].replace(" ", "_")
                            for (k, v) in val[0]:  # k, v = ('0', 'PER:5409')
                                response[key+ind1+ind2+k].append(v)
            # The output dictionary is something like:
            # {"100__a": ['Smith, J.'],
            #  "700__a": ['Anderson, J.', 'Someoneelse, E.'],
            #  "700__u": ['University1', 'University2']}

        # return dictionary as JSON
        return json.dumps(response)
    def test_create_example_url(self, email, login_method, robot, ip, assertion=None, timeout=None, referer=None, groups=None, nickname=None):
        """
        Create a test URL to test the robot login.

        @param email: email of the user we want to login as.
        @type email: string
        @param login_method: the login_method name as specified in CFG_EXTERNAL_AUTHENTICATION.
        @type login_method: string
        @param robot: the identifier of this robot.
        @type robot: string
        @param assertion: any further data we want to send to.
        @type: json serializable mapping
        @param ip: the IP of the user.
        @type: string
        @param timeout: timeout when the URL will expire (in seconds from the Epoch)
        @type timeout: float
        @param referer: the URL where to land after successful login.
        @type referer: string
        @param groups: the list of optional group of the user.
        @type groups: list of string
        @param nickname: the optional nickname of the user.
        @type nickname: string
        @return: the URL to login as the user.
        @rtype: string
        """
        from invenio.access_control_config import CFG_EXTERNAL_AUTHENTICATION
        from invenio.urlutils import create_url
        if assertion is None:
            assertion = {}
        assertion[self.email_attribute_name] = email
        if nickname:
            assertion[self.nickname_attribute_name] = nickname
        if groups:
            assertion[self.groups_attribute_name] = self.groups_separator.join(groups)
        if timeout is None:
            timeout = time.time() + CFG_ROBOT_URL_TIMEOUT
        assertion[self.timeout_attribute_name] = timeout
        if referer is None:
            referer = CFG_SITE_URL
        if login_method is None:
            for a_login_method, details in CFG_EXTERNAL_AUTHENTICATION.iteritems():
                if details[2]:
                    login_method = a_login_method
                    break
        robot_keys = load_robot_keys()
        assertion[self.userip_attribute_name] = ip
        assertion = json.dumps(assertion)
        if self.use_zlib:
            assertion = base64.urlsafe_b64encode(compress(assertion))
        shared_key = robot_keys[login_method][robot]
        digest = self.sign(shared_key, assertion)
        return create_url("%s%s" % (CFG_SITE_SECURE_URL, "/youraccount/robotlogin"), {
            'assertion': assertion,
            'robot': robot,
            'login_method': login_method,
            'digest': digest,
            'referer': referer})
Пример #12
0
def get_kbd_values_json(kbname, searchwith=""):
    """Return values from searching a dynamic kb as a json-formatted string.

    This IS probably the method you want.

    @param kbname:     name of the knowledge base
    @param searchwith: a term to search with
    """
    res = get_kbd_values(kbname, searchwith)
    return json.dumps(res)
Пример #13
0
def get_kbd_values_json(kbname, searchwith=""):
    """Return values from searching a dynamic kb as a json-formatted string.

    This IS probably the method you want.

    @param kbname:     name of the knowledge base
    @param searchwith: a term to search with
    """
    res = get_kbd_values(kbname, searchwith)
    return json.dumps(res)
Пример #14
0
def generate_mediaexport_album(recid, resource_id, json_format=True):
    """Return the report number of associate images.

    :param str recid: The record id.
    :param str resource_id: The report number.
    :param str json_format: If true, returns JSON dump, otherwise a dictionary
    """
    # Fileds that are required
    MEDIA_CONFIG = {
        'title_en': ('245', ' ', ' ', 'a'),
        'title_fr': ('246', ' ', '1', 'a'),
    }
    bibarchive = BibRecDocs(recid)
    bibarchive_with_deleted = BibRecDocs(recid, deleted_too=True)
    bibdocs = bibarchive.list_bibdocs()
    doc_numbers = [(bibdoc.get_id(), bibdoc.get_docname(), bibdoc) for bibdoc in bibarchive_with_deleted.list_bibdocs()]
    doc_numbers.sort()
    # Calculate the size
    bibdoc_size = len(bibdocs)
    # Get the record
    record = get_record(recid)
    # Build the response
    entry = {}

    for key in MEDIA_CONFIG:
        entry[key] = record_get_field_value(record, *MEDIA_CONFIG[key])

    entry['id'] = resource_id
    entry['record_id'] = str(recid)
    entry['entry_date'] = get_creation_date(recid)
    entry['total'] = bibdoc_size
    entry['type'] = 'album'
    entry['images'] = []

    # Foreach doc create the corresponding report number
    for (docid, docname, bibdoc) in doc_numbers:
        if not bibdoc.deleted_p():
            bibdoc_number = doc_numbers.index((bibdoc.get_id(), bibdoc.get_docname(), bibdoc)) + 1
            image = generate_mediaexport(recid, True, resource_id, bibdoc_number, False)
            image['tirage_id'] = bibdoc_number
            image['id'] = '{0}-{1}'.format(image['id'], bibdoc_number)
            entry['images'].append(image)

    final = {}
    final['entries'] = [{'entry': entry}]

    if not CFG_JSON_AVAILABLE:
        return ''

    if json_format:
        return json.dumps(final)
    else:
        return final
Пример #15
0
def get_entries_fragment(req, year, month, day, start, limit, filter, pagerPrefix, pageNumber):
    """ Serve the request of getting only part of the result set """
    try:
        getUid(req)
    except Error:
        return "unauthorised access !"
    result = {"pagerPrefix": pagerPrefix, "pageNumber": pageNumber}
    auth = check_user(req, "cfgoaiharvest")
    if not auth[0]:
        result["html"] = oha.perform_request_gethpdayfragment(
            int(year), int(month), int(day), int(limit), int(start), filter
        )
        return json.dumps(result)
    else:
        return "unauthorised access !"
Пример #16
0
def print_rules():
    """Prints the valid rules to stdout"""
    plugins = load_plugins()
    for rule_name, rule in load_rules(plugins).items():
        print "Rule %s:" % rule_name
        if "filter_pattern" in rule:
            print " - Filter: %s" % rule["filter_pattern"]
        if "filter_collection" in rule:
            print " - Filter collection: %s" % rule["filter_collection"]
        print " - Checker: %s" % rule["check"]
        if len(rule["checker_params"]) > 0:
            print "      Parameters:"
            for param, val in rule["checker_params"].items():
                print "      %s = %s" % (param, json.dumps(val))

        print
Пример #17
0
def print_rules():
    """Prints the valid rules to stdout"""
    plugins  = load_plugins()
    for rule_name, rule in load_rules(plugins).items():
        print "Rule %s:" % rule_name
        if "filter_pattern" in rule:
            print " - Filter: %s" % rule["filter_pattern"]
        if "filter_collection" in rule:
            print " - Filter collection: %s" % rule["filter_collection"]
        print " - Checker: %s" % rule["check"]
        if len(rule["checker_params"]) > 0:
            print "      Parameters:"
            for param, val in rule["checker_params"].items():
                print "      %s = %s" % (param, json.dumps(val))

        print
Пример #18
0
def getHoldingPenData(req, elementId):
    try:
        getUid(req)
    except Error:
        return "unauthorised access !"
    auth = check_user(req, 'cfgoaiharvest')
    if auth[0]:
        return "unauthorised access !"

    elements = elementId.split("_")
    resultHtml = None

    if len(elements) == 2:
        filter_key = elements[1]
        resultHtml = oha.perform_request_gethpyears(elements[0], filter_key)
    elif len(elements) == 3:
        # only the year is specified
        filter_key = elements[2]
        nodeYear = int(elements[1])
        resultHtml = oha.perform_request_gethpyear(elements[0], nodeYear,
                                                   filter_key)

    elif len(elements) == 4:
        # year and month specified
        nodeYear = int(elements[1])
        nodeMonth = int(elements[2])
        filter_key = elements[3]
        resultHtml = oha.perform_request_gethpmonth(elements[0], nodeYear,
                                                    nodeMonth, filter_key)

    elif len(elements) == 5:
        # year, month and day specified - returning the entries themselves
        nodeYear = int(elements[1])
        nodeMonth = int(elements[2])
        nodeDay = int(elements[3])
        filter_key = elements[4]
        daySize = get_holdingpen_day_size(nodeYear, nodeMonth, nodeDay,
                                          filter_key)
        urlFilter = urllib.quote(filter_key)
        resultHtml = perform_request_gethpdayfragment(nodeYear, nodeMonth,
                                                      nodeDay, daySize, 0,
                                                      urlFilter)
    else:
        # nothing of the above. error
        resultHtml = "Wrong request"
    return json.dumps({"elementId": elementId, "html": resultHtml})
Пример #19
0
def register_redirection(label,
                         plugin,
                         parameters=None,
                         update_on_duplicate=False):
    """
    Register a redirection from /goto/<LABEL> to the URL returned by running the
    given plugin (as available in CFG_GOTO_PLUGINS), with the given parameters.

    @param label: the uniquely identifying label for this redirection
    @type label: string

    @param plugin: the algorithm that should resolve the redirection, usually:
        "goto_plugin_FOO"
    @type plugin: string

    @param parameters: further parameters that should be passed to the plugin.
        This should be a dictionary or None. Note that these parameters could
        be overridden by the query parameters.
    @type parameters: dict or None

    @param update_on_duplicate: if False (default), if the label already exist it
        L{register_redirection} will raise a ValueError exception. If True, it
        will implicitly call L{update_redirection}.
    @type update_on_duplicate: bool

    @raises: ValueError in case of duplicate label and L{update_on_duplicate} is
        set to False.

    @note: parameters are going to be serialized to JSON before being stored
        in the DB. Hence only JSON-serializable values should be put there.
    """
    if run_sql("SELECT label FROM goto WHERE label=%s", (label, )):
        raise ValueError("%s label already exists" % label)
    if plugin not in CFG_GOTO_PLUGINS:
        raise ValueError("%s plugin does not exist" % plugin)
    if parameters is None:
        parameters = {}
    try:
        parameters.items(
        )  ## dummy test to see if it exposes the dict interface
        json_parameters = json.dumps(parameters)
    except Exception, err:
        raise ValueError(
            "The parameters %s do not specify a valid JSON map: %s" %
            (parameters, err))
Пример #20
0
def get_entries_fragment(req, year, month, day, start, limit, filter,
                         pagerPrefix, pageNumber):
    """ Serve the request of getting only part of the result set """
    try:
        getUid(req)
    except Error:
        return "unauthorised access !"
    result = {
        "pagerPrefix": pagerPrefix,
        "pageNumber": pageNumber,
    }
    auth = check_user(req, 'cfgoaiharvest')
    if not auth[0]:
        result["html"] = oha.perform_request_gethpdayfragment(
            int(year), int(month), int(day), int(limit), int(start), filter)
        return json.dumps(result)
    else:
        return "unauthorised access !"
Пример #21
0
def create_job_from_dictionary(job_dict, job_filename=None, job_directory=CFG_BIBENCODE_DAEMON_DIR_NEWJOBS):
    """ Creates a job from a given dictionary
    @param job_dict: Dictionary that contains the job description
    @type job_dict: job_dict
    @param job_filename: Filename for the job
    @type job_filename: string
    @param job_directory: fullpath to the directory storing the job files
    @type job_directory: string
    """
    if not job_filename:
        job_filename = str(uuid.uuid4())
    if not job_filename.endswith(".job"):
        job_filename += ".job"
    job_fullpath = os.path.join(job_directory, job_filename)
    job_string = json.dumps(job_dict, sort_keys=False, indent=4)
    file = open(job_fullpath, "w")
    file.write(job_string)
    file.close()
Пример #22
0
def create_job_from_dictionary(job_dict,
                               job_filename=None,
                               job_directory=CFG_BIBENCODE_DAEMON_DIR_NEWJOBS):
    """ Creates a job from a given dictionary
    @param job_dict: Dictionary that contains the job description
    @type job_dict: job_dict
    @param job_filename: Filename for the job
    @type job_filename: string
    @param job_directory: fullpath to the directory storing the job files
    @type job_directory: string
    """
    if not job_filename:
        job_filename = str(uuid.uuid4())
    if not job_filename.endswith(".job"):
        job_filename += ".job"
    job_fullpath = os.path.join(job_directory, job_filename)
    job_string = json.dumps(job_dict, sort_keys=False, indent=4)
    file = open(job_fullpath, "w")
    file.write(job_string)
    file.close()
Пример #23
0
    def json_req_profiler(self, req, form):
        if "ajaxProfile" in form:
            profiler = cProfile.Profile()
            return_val = profiler.runcall(func, self, req, form)

            results = cStringIO.StringIO()
            stats = pstats.Stats(profiler, stream=results)
            stats.sort_stats('cumulative')
            stats.print_stats(100)

            json_in = json.loads(str(form['jsondata']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            json_in = json_unicode_to_utf8(json_in)

            json_data = json.loads(return_val)
            json_data.update({"profilerStats": "<pre style='overflow: scroll'>" + json_in['requestType'] + results.getvalue() + "</pre>"})
            return json.dumps(json_data)
        else:
            return func(self, req, form)
Пример #24
0
    def json_req_profiler(self, req, form):
        if "ajaxProfile" in form:
            profiler = cProfile.Profile()
            return_val = profiler.runcall(func, self, req, form)

            results = cStringIO.StringIO()
            stats = pstats.Stats(profiler, stream=results)
            stats.sort_stats('cumulative')
            stats.print_stats(100)

            json_in = json.loads(str(form['jsondata']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            json_in = json_unicode_to_utf8(json_in)

            json_data = json.loads(return_val)
            json_data.update({"profilerStats": "<pre style='overflow: scroll'>" + json_in['requestType'] + results.getvalue() + "</pre>"})
            return json.dumps(json_data)
        else:
            return func(self, req, form)
Пример #25
0
def getHoldingPenData(req, elementId):
    try:
        getUid(req)
    except Error:
        return "unauthorised access !"
    auth = check_user(req, 'cfgoaiharvest')
    if auth[0]:
        return "unauthorised access !"

    elements = elementId.split("_")
    resultHtml = None

    if len(elements) == 2:
        filter_key = elements[1]
        resultHtml = oha.perform_request_gethpyears(elements[0], filter_key)
    elif len(elements) == 3:
        # only the year is specified
        filter_key = elements[2]
        nodeYear = int(elements[1])
        resultHtml = oha.perform_request_gethpyear(elements[0], nodeYear, filter_key)

    elif len(elements) == 4:
        # year and month specified
        nodeYear = int(elements[1])
        nodeMonth = int(elements[2])
        filter_key = elements[3]
        resultHtml = oha.perform_request_gethpmonth(elements[0], nodeYear, nodeMonth, filter_key)

    elif len(elements) == 5:
        # year, month and day specified - returning the entries themselves
        nodeYear = int(elements[1])
        nodeMonth = int(elements[2])
        nodeDay = int(elements[3])
        filter_key = elements[4]
        daySize = get_holdingpen_day_size(nodeYear, nodeMonth, nodeDay, filter_key)
        urlFilter = urllib.quote(filter_key)
        resultHtml = perform_request_gethpdayfragment(nodeYear, nodeMonth, nodeDay, daySize, 0, urlFilter)
    else:
        # nothing of the above. error
        resultHtml = "Wrong request"
    return json.dumps({"elementId": elementId, "html": resultHtml})
Пример #26
0
def generate_mediaexport_basket(basket_id):
    """
    Exports the content of a basket. Takes each record from a basket and
    calls either generate_mediaexport_album or generate_mediaexport.

    :param str basket_id: The basket id.
    """
    records = get_basket_content(basket_id, format='')
    recids = [record[0] for record in records]

    output = {}
    output['entries'] = []
    for record_id in recids:
        # For each record_id return metadata
        record = get_record(record_id)
        if not record:
            # There is no record, for example when the record_id < 0 (external
            # resource). Skip it.
            continue
        report_number = record_get_field_value(record, *('037', ' ', ' ', 'a'))
        album_dict = generate_mediaexport_album(record_id, report_number, False)
        album_entries = album_dict.get('entries', None)
        if album_entries:
            output['entries'].append(album_entries)
        else:
            # If it's not an album, check if it's an image
            is_image = False
            collections = record_get_field_values(record, *('980', ' ', ' ', 'a'))
            collections.append(record_get_field_values(record, *('980', ' ', ' ', 'b')))
            for collection in collections:
                if "PHOTO" in collection:
                    is_image = True
                    break
            tirage = report_number.rsplit("-", 1)[-1]
            media_dict = generate_mediaexport(record_id, is_image, report_number, tirage, False, False)
            if media_dict:
                output['entries'].append(media_dict)

    return json.dumps(output)
Пример #27
0
def get_kb_mappings_json(kb_name="", key="", value="", match_type="s", limit=None):
    """Get leftside/rightside mappings from kb kb_name formatted as json dict.

       If key given, give only those with left side (mapFrom) = key.
       If value given, give only those with right side (mapTo) = value.

       @param kb_name: the name of the kb
       @param key: include only lines matching this on left side in the results
       @param value: include only lines matching this on right side in the results
       @param match_type: s = substring match, e = exact match
       @param limit: maximum number of results to return (are ALL if set to None)
       @return a list of mappings
    """
    mappings = get_kb_mappings(kb_name, key, value, match_type)
    ret = []
    if limit is None:
        limit = len(mappings)
    for m in mappings[:limit]:
        label = m['value'] or m['key']
        value = m['key'] or m['value']
        ret.append({'label': label, 'value': value})
    return json.dumps(ret)
Пример #28
0
def generate_mediaexport(recid, is_image, resource_id, tirage, wrapped, json_format=True):
    """Generates the JSON with the info needed to export a media resource to  CERN-Drupal"""
    """Mandatory fields to export: title_en, title_fr, caption_en, caption_fr,
                                   copyright_holder, copyright_date, attribution (image),
                                   keywords (image), directors (video), producer (video)
    """

    MEDIA_CONFIG = {'title_en':         ('245', ' ', ' ', 'a'),
                    'title_fr':         ('246', ' ', '1', 'a'),
                    'keywords':         ('653', '1', ' ', 'a'),
                    'copyright_holder': ('542', ' ', ' ', 'd'),
                    'copyright_date':   ('542', ' ', ' ', 'g'),
                    'license_url':      ('540', ' ', ' ', 'a'),
                    'license_desc':     ('540', ' ', ' ', 'b'),
                    'license_body':     ('540', ' ', ' ', 'u'),
                    'author':           ('100', ' ', ' ', 'a'),
                    'affiliation':      ('100', ' ', ' ', 'u'),
                    'directors':        ('700', ' ', ' ', 'a'),
                    'video_length':     ('300', ' ', ' ', 'a'),
                    'language':         ('041', ' ', ' ', 'a'),
                    'creation_date':    ('269', ' ', ' ', 'c'),
                    'abstract_en':      ('520', ' ', ' ', 'a'),
                    'abstract_fr':      ('590', ' ', ' ', 'a')}

    entry = {}
    record = get_record(recid)

    for key in MEDIA_CONFIG:
        entry[key] = record_get_field_value(record, *MEDIA_CONFIG[key])#.encode('utf-8')

    entry['id'] = resource_id
    entry['record_id'] = str(recid)
    entry['type'] = is_image and "image" or "video"
    entry['entry_date'] = get_creation_date(recid)

    toc_recid = 0
    toc_record = {}
    if not is_image and 'asset' in record_get_field_value(record, *('970', ' ', ' ', 'a')):
        toc_repnum = record_get_field_value(record, *('773', ' ', ' ', 'r'))
        if toc_repnum:
            try:
                toc_recid = search_pattern(p='reportnumber:"%s"' %toc_repnum)[0]
            except IndexError:
                pass

    #corner cases for copyright & licence
    if not entry['copyright_holder']:
        entry['copyright_holder'] = 'CERN'
    if not entry['license_body']:
        entry['license_body'] = 'CERN'
    if not entry['license_desc']:
        entry['license_desc'] = 'CERN'
    if not entry['license_url']:
        from invenio.bibknowledge import get_kb_mapping
        try:
            entry['license_url'] = get_kb_mapping(kb_name='LICENSE2URL', key=entry['license_desc'])['value']
        except KeyError:
            pass

    #keywords
    entry['keywords'] = ','.join(record_get_field_values(record, *MEDIA_CONFIG['keywords']))

    #attribution
    if not entry.get('author', '') and not entry.get('attribution', '') and toc_recid > 0:
        if not toc_record:
            toc_record = get_record(toc_recid)
        entry['author'] = record_get_field_value(toc_record, *MEDIA_CONFIG['author'])
        entry['affiliation'] = record_get_field_value(toc_record, *MEDIA_CONFIG['affiliation'])
        if not entry.get('directors', ''):
            entry['directors'] = ','.join(record_get_field_values(toc_record, *MEDIA_CONFIG['directors']))

    #photos
    if is_image:
        if entry['author']:
            entry['attribution'] = entry['author']
        if entry['affiliation']:
            entry['attribution'] += ': %s' % entry['affiliation']
        del entry['directors']
    else: #videos
        if entry['author']:
            entry['producer'] = entry['author']
        # Get all files from record
        files_field = ('856', '7', ' ', 'u')
        # Filter all that are images
        thumbnails = [
            image for image in record_get_field_values(record, *files_field)
            if 'jpg' in image
        ]
        # If exists get the first one
        if thumbnails:
            entry['thumbnail'] = thumbnails[0]


    del entry['author']
    del entry['affiliation']

    #
    #title
    if not entry['title_en'] and not entry['title_fr'] and toc_recid > 0:
        if not toc_record:
            toc_record = get_record(toc_recid)
        entry['title_en'] = record_get_field_value(toc_record, *MEDIA_CONFIG['title_en'])
        entry['title_fr'] = record_get_field_value(toc_record, *MEDIA_CONFIG['title_fr'])

    #crop, media storage, caption
    if is_image:
        entry['file_params'] = {'size': ['small', 'medium', 'large'], 'crop': False}

        if 'MediaArchive' in record_get_field_values(record, *('856', '7', ' ', '2')):
            entry['caption_en'] = get_photolab_image_caption(record, tirage)
            entry['caption_fr'] = ''
        else:
            brd = BibRecDocs(recid, deleted_too=True)
            doc_numbers = [(bibdoc.get_id(), bibdoc) for bibdoc in brd.list_bibdocs()]
            doc_numbers.sort()
            bibdoc = doc_numbers[tirage-1][1]
            entry['filename'] = brd.get_docname(bibdoc.get_id()) #bibdoc.get_docname()
            if 'crop' in [bibdocfile.get_subformat() for bibdocfile in bibdoc.list_latest_files()]:
                entry['file_params']['crop'] = True
            if not bibdoc.deleted_p():
                for bibdoc_file in bibdoc.list_latest_files():
                    entry['caption_en'] = bibdoc_file.get_comment()
                    entry['caption_fr'] = bibdoc_file.get_description()
                    if entry.get('caption_en', ''):
                        break

    if not entry.get('caption_en', ''):
        entry['caption_en'] = entry['abstract_en']
    if not entry.get('caption_fr', ''):
        entry['caption_fr'] = entry['abstract_fr']

    if is_image:
        del entry['language']
        del entry['video_length']

    # we don't need it
    del entry['abstract_en']
    del entry['abstract_fr']

    #make sure all mandatory fields are sent
    MANDATORY_FIELDS = ['title_en', 'title_fr', 'caption_en', 'caption_fr', 'copyright_holder', 'copyright_date']
    MANDATORY_FIELDS_IMAGE = MANDATORY_FIELDS + ['attribution', 'keywords']
    MANDATORY_FIELDS_VIDEO = MANDATORY_FIELDS + ['directors', 'producer', 'thumbnail']

    if is_image:
        mandatory_fields_all = MANDATORY_FIELDS_IMAGE
    else:
        mandatory_fields_all = MANDATORY_FIELDS_VIDEO

    for field in mandatory_fields_all:
        entry.setdefault(field, '')
    # In case we want to embed the object
    if wrapped:
        final = {}
        final['entries'] = [{'entry': entry}]

        if not CFG_JSON_AVAILABLE:
            return ''

        if json_format:
            return json.dumps(final)
        else:
            return final
    else:
        return entry
Пример #29
0
    def _process_json_request(self, form, req):
        """Takes care about the json requests."""

        argd = wash_urlargd(form, {self._JSON_DATA_KEY: (str, "")})

        # load json data
        json_data_string = argd[self._JSON_DATA_KEY]
        json_data_unicode = json.loads(json_data_string)
        json_data = json_unicode_to_utf8(json_data_unicode)

        language = json_data["language"]
        search_criteria = json_data["searchCriteria"]
        output_tags = json_data["outputTags"]
        output_tags = output_tags.split(",")
        output_tags = [tag.strip() for tag in output_tags]
        action_type = json_data["actionType"]
        current_record_id = json_data["currentRecordID"]
        commands = json_data["commands"]
        output_format = json_data["outputFormat"]
        page_to_display = json_data["pageToDisplay"]
        collection = json_data["collection"]
        compute_modifications = json_data["compute_modifications"]
        checked_records = json_data["checked_records"]

        json_response = {}
        if action_type == self._action_types.test_search:
            json_response.update(
                multi_edit_engine.perform_request_test_search(
                    search_criteria,
                    [],
                    output_format,
                    page_to_display,
                    language,
                    output_tags,
                    collection,
                    req=req,
                    checked_records=checked_records,
                )
            )
            json_response["display_info_box"] = 1
            json_response["info_html"] = ""
            return json.dumps(json_response)

        elif action_type == self._action_types.display_detailed_record:
            json_response.update(
                multi_edit_engine.perform_request_detailed_record(current_record_id, [], output_format, language)
            )
            return json.dumps(json_response)

        elif action_type == self._action_types.preview_results:
            commands_list, upload_mode, tag_list = self._create_commands_list(commands)
            json_response = {}
            json_response.update(
                multi_edit_engine.perform_request_test_search(
                    search_criteria,
                    commands_list,
                    output_format,
                    page_to_display,
                    language,
                    output_tags,
                    collection,
                    compute_modifications,
                    upload_mode,
                    req,
                    checked_records,
                )
            )
            return json.dumps(json_response)

        elif action_type == self._action_types.display_detailed_result:
            commands_list, upload_mode, tag_list = self._create_commands_list(commands)
            json_response.update(
                multi_edit_engine.perform_request_detailed_record(
                    current_record_id, commands_list, output_format, language
                )
            )
            return json.dumps(json_response)

        elif action_type == self._action_types.submit_changes:
            commands_list, upload_mode, tag_list = self._create_commands_list(commands)
            json_response.update(
                multi_edit_engine.perform_request_submit_changes(
                    search_criteria, commands_list, language, upload_mode, tag_list, collection, req, checked_records
                )
            )
            return json.dumps(json_response)

        # In case we obtain wrong action type we return empty page.
        return " "
Пример #30
0
    def index(self, req, form):
        """Handle all BibMerge requests.
        The responsibilities of this functions are:
        * JSON decoding and encoding.
        * Redirection, if necessary.
        * Authorization.
        * Calling the appropriate function from the engine.
        """
        # If it is an Ajax request, extract any JSON data.
        ajax_request, recid1, recid2 = False, None, None
        argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
        if form.has_key('jsondata'):
            json_data = json.loads(str(form['jsondata']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            json_data = json_unicode_to_utf8(json_data)
            ajax_request = True
            json_response = {}
            try:
                if json_data.has_key('recID1'):
                    recid1 = int(json_data['recID1'])
                    json_data['recID1'] = recid1
                if json_data.has_key('recID2'):
                    if json_data.get('record2Mode') == "recid":
                        recid2 = int(json_data['recID2'])
                        json_data['recID2'] = recid2
            except ValueError:
                json_response.update({
                    'resultCode': 1,
                    'resultText': 'Invalid record ID!'
                })
                return json.dumps(json_response)
            if json_data.has_key("duplicate"):
                if json_data.get('record2Mode') == "recid":
                    json_data["duplicate"] = int(json_data["duplicate"])

        # Authorization.
        user_info = collect_user_info(req)
        if user_info['email'] == 'guest':
            # User is not logged in.
            if not ajax_request:
                # Do not display the introductory recID selection box to guest
                # users (as it used to be with v0.99.0):
                auth_code, auth_message = acc_authorize_action(
                    req, 'runbibmerge')
                referer = '/merge/'
                return page_not_authorized(req=req,
                                           referer=referer,
                                           text=auth_message,
                                           navtrail=navtrail)
            else:
                # Session has most likely timed out.
                json_response.update({
                    'resultCode': 1,
                    'resultText': 'Error: Not logged in'
                })
                return json.dumps(json_response)

        elif self.recid:
            # Handle RESTful call by storing recid and redirecting to
            # generic URL.
            redirect_to_url(
                req, '%s/%s/merge/' % (CFG_SITE_SECURE_URL, CFG_SITE_RECORD))

        if recid1 is not None:
            # Authorize access to record 1.
            auth_code, auth_message = acc_authorize_action(
                req,
                'runbibmerge',
                collection=guess_primary_collection_of_a_record(recid1))
            if auth_code != 0:
                json_response.update({
                    'resultCode':
                    1,
                    'resultText':
                    'No access to record %s' % recid1
                })
                return json.dumps(json_response)
        if recid2 is not None:
            # Authorize access to record 2.
            auth_code, auth_message = acc_authorize_action(
                req,
                'runbibmerge',
                collection=guess_primary_collection_of_a_record(recid2))
            if auth_code != 0:
                json_response.update({
                    'resultCode':
                    1,
                    'resultText':
                    'No access to record %s' % recid2
                })
                return json.dumps(json_response)

        # Handle request.
        uid = getUid(req)
        if not ajax_request:
            # Show BibEdit start page.
            body, errors, warnings = perform_request_init()

            scripts = ["json2.js", "bibmerge_engine.js"]
            metaheaderadd = ""
            for script in scripts:
                metaheaderadd += '<script type="text/javascript" src="%s/%s"></script>' % (
                    CFG_SITE_URL, auto_version_url("js/" + script))

            return page(title='Record Merger',
                        metaheaderadd=metaheaderadd,
                        body=body,
                        errors=errors,
                        warnings=warnings,
                        uid=uid,
                        language=argd['ln'],
                        navtrail=navtrail,
                        lastupdated=__lastupdated__,
                        req=req)
        else:
            # Handle AJAX request.
            json_response = perform_request_ajax(req, uid, json_data)
            return json.dumps(json_response)
    def index(self, req, form):
        """Handle all requests"""

        uid = getUid(req)
        argd = wash_urlargd(form, {'ln' : (str, CFG_SITE_LANG),
                           'state' : (str, '')})
        ln = argd['ln']
        state = argd['state']
        _ = gettext_set_language(ln)

        # Abort if the simplejson module isn't available
        if not CFG_JSON_AVAILABLE:
            title = 'Authorlist Manager'
            body = '''Sorry, the record editor cannot operate when the
                `simplejson' module is not installed.  Please see the INSTALL
                file.'''
            return page(title       = title,
                        body        = body,
                        errors      = [],
                        warnings    = [],
                        uid         = uid,
                        language    = ln,
                        navtrail    = navtrail,
                        lastupdated = __lastupdated__,
                        req         = req)

        # Extract additional JSON data from form
        if 'options' in form:
            options = json.loads(str(form['options']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            options = json_unicode_to_utf8(options)

        # Authorization.
        not_authorized = authorlist_engine.user_authorization(req, ln)
        if not_authorized:
            return not_authorized

        # User is authorized, let's handle different states
        # if no state parameter, load the main page
        if state == '':

            return page(title         = _('Author List Manager'),
                        metaheaderadd = authorlist_templates.index_header(),
                        body          = authorlist_templates.body(),
                        errors        = [],
                        warnings      = [],
                        uid           = uid,
                        language      = ln,
                        navtrail      = navtrail,
                        lastupdated   = __lastupdated__,
                        req           = req)

        elif state == 'itemize':
            data = authorlist_db.itemize(uid)

            req.content_type = 'application/json'
            req.write(json.dumps(data))

        # open paremeter set? initialize a Authorlist instance
        elif state == 'open':
            # if 'id' in url, check if user has right to modify this paper
            try:
                received = wash_urlargd(form, {'id': (str, None)})
                paper_id = received['id']

                if authorlist_engine.check_user_rights(uid, paper_id):
                    return page(title         = _('Author List Manager'),
                        metaheaderadd = authorlist_templates.list_header(),
                        body          = authorlist_templates.body(),
                        errors        = [],
                        warnings      = [],
                        uid           = uid,
                        language      = ln,
                        navtrail      = navtrail,
                        lastupdated   = __lastupdated__,
                        req           = req)
                else:
                    # no rights to modify this paper
                    redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))
            except:
                # redirect to the main page if weird stuff happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))
                        
        # On load state we will answer with the JSON encoded data of the passed 
        # paper id. Should usually not be directly surfed by the user.
        elif state == 'load':
            try:
                received = wash_urlargd(form, {'id': (str, None)})
                paper_id = received['id']
                data = authorlist_db.load(paper_id)
                
                req.content_type = 'application/json'
                req.write(json.dumps(data))
            except:
                # redirect to the main page if weird stuff happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        # The save state saves the send data in the database using the passed
        # paper id. Responds with a JSON object containing the id of the paper
        # as saved in the database. Should usually not be surfed directly by the
        # user
        elif state == 'save':
            try:
                received = wash_urlargd(form, {'id': (str, None),
                                               'data': (str, '')})
                paper_id = received['id']
                in_data = json.loads(received['data'])
                out_data = authorlist_db.save(paper_id, uid, in_data)

                req.content_type = 'application/json'
                req.write(json.dumps(out_data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        # Clones the paper with the given id in the database and responds with a
        # JSON object containing the id of the clone. Should usually not surfed
        # directly by the user.
        elif state == 'clone':
            try:
                received = wash_urlargd(form, {'id': (str, None)})
                paper_id = received['id']
                data = authorlist_db.clone(paper_id, uid)

                req.content_type = 'application/json'
                req.write(json.dumps(data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        # Transform the sent data into the format passed in the URL using a
        # authorlist_engine converter. Reponds with the MIME type of the
        # converter and offers it as a download (content-disposition header).
        elif state == 'export':
            try:
                received = wash_urlargd(form, {'format': (str, None),
                                               'data': (str, '')})
                data_format = received['format']
                data = received['data']

                converter = authorlist_engine.Converters.get(data_format)

                attachement = 'attachement; filename="%s"' % converter.FILE_NAME
                req.headers_out['Content-Type'] = converter.CONTENT_TYPE
                req.headers_out['Content-Disposition'] = attachement
                #redirect_to_url(req, authorlist_engine.dumps(data, converter))
                req.write(authorlist_engine.dumps(data, converter))
            except:
                # throw exception if something weird happens
                return sys.exc_info()

        elif state == 'delete':
            try:
                received = wash_urlargd(form, {'id': (str, None)})
                paper_id = received['id']

                data = authorlist_db.delete(paper_id)

                req.content_type = 'application/json'
                req.write(json.dumps(data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        elif state == 'import':
            try:
                received = wash_urlargd(form, {'importid': (str, None)})
                recID = received['importid']
                data = authorlist_engine.retrieve_data_from_record(recID)
                req.content_type = 'application/json'
                req.write(json.dumps(data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        elif state == 'importxml':
            try:
                received = wash_urlargd(form, {'xmlfile': (Field, None)})
                xml_string = received['xmlfile'].value
                import_data = authorlist_engine.retrieve_data_from_xml(xml_string)
                req.content_type = 'application/json'
                req.write(json.dumps(import_data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))
        # No state given, just go to the main page.
        else:
            redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))
Пример #32
0
    def index(self, req, form):
        """Handle all BibMerge requests.
        The responsibilities of this functions are:
        * JSON decoding and encoding.
        * Redirection, if necessary.
        * Authorization.
        * Calling the appropriate function from the engine.
        """
        # If it is an Ajax request, extract any JSON data.
        ajax_request, recid1, recid2 = False, None, None
        argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
        if form.has_key('jsondata'):
            json_data = json.loads(str(form['jsondata']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            json_data = json_unicode_to_utf8(json_data)
            ajax_request = True
            json_response = {}
            if json_data.has_key('recID1'):
                recid1 = json_data['recID1']
            if json_data.has_key('recID2'):
                recid2 = json_data['recID2']

        # Authorization.
        user_info = collect_user_info(req)
        if user_info['email'] == 'guest':
            # User is not logged in.
            if not ajax_request:
                # Do not display the introductory recID selection box to guest
                # users (as it used to be with v0.99.0):
                auth_code, auth_message = acc_authorize_action(req, 'runbibmerge')
                referer = '/merge/'
                return page_not_authorized(req=req, referer=referer,
                                           text=auth_message, navtrail=navtrail)
            else:
                # Session has most likely timed out.
                json_response.update({'resultCode': 1,
                                      'resultText': 'Error: Not logged in'})
                return json.dumps(json_response)

        elif self.recid:
            # Handle RESTful call by storing recid and redirecting to
            # generic URL.
            redirect_to_url(req, '%s/%s/merge/' % (CFG_SITE_SECURE_URL, CFG_SITE_RECORD) )

        if recid1 is not None:
            # Authorize access to record 1.
            auth_code, auth_message = acc_authorize_action(req, 'runbibmerge',
                collection=guess_primary_collection_of_a_record(recid1))
            if auth_code != 0:
                json_response.update({'resultCode': 1, 'resultText': 'No access to record %s' % recid1})
                return json.dumps(json_response)
        if recid2 is not None:
            # Authorize access to record 2.
            auth_code, auth_message = acc_authorize_action(req, 'runbibmerge',
                collection=guess_primary_collection_of_a_record(recid2))
            if auth_code != 0:
                json_response.update({'resultCode': 1, 'resultText': 'No access to record %s' % recid2})
                return json.dumps(json_response)

        # Handle request.
        uid = getUid(req)
        if not ajax_request:
            # Show BibEdit start page.
            body, errors, warnings = perform_request_init()
            metaheaderadd = """<script type="text/javascript" src="%(site)s/js/json2.js"></script>
  <script type="text/javascript" src="%(site)s/js/bibmerge_engine.js"></script>""" % {'site': CFG_SITE_SECURE_URL}
            title = 'Record Merger'
            return page(title         = title,
                        metaheaderadd = metaheaderadd,
                        body          = body,
                        errors        = errors,
                        warnings      = warnings,
                        uid           = uid,
                        language      = argd['ln'],
                        navtrail      = navtrail,
                        lastupdated   = __lastupdated__,
                        req           = req)
        else:
            # Handle AJAX request.
            json_response = perform_request_ajax(req, uid, json_data)
            return json.dumps(json_response)
Пример #33
0
                            os.rename(os.path.join(icon_path, icon_name),
                                      os.path.join(icons_dir, icon_name))
                            added_files[key] = {
                                'name': filename,
                                'iconName': icon_name
                            }
                        except InvenioWebSubmitIconCreatorError, e:
                            # We could not create the icon
                            added_files[key] = {'name': filename}
                            continue
                    else:
                        raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST)

            # Send our response
            if CFG_JSON_AVAILABLE:
                return json.dumps(added_files)

    def upload_video(self, req, form):
        """
        A clone of uploadfile but for (large) videos.
        Does not copy the uploaded file to the websubmit directory.
        Instead, the path to the file is stored inside the submission directory.
        """
        def gcd(a, b):
            """ the euclidean algorithm """
            while a:
                a, b = b % a, a
            return b

        from invenio.bibencode_extract import extract_frames
        from invenio.bibencode_config import CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_DIR, CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_FNAME
Пример #34
0
                % (CFG_SITE_URL, nodeYear, nodeMonth, nodeDay,
                   i * resultsPerPage, resultsPerPage, urlFilter),
                "selector":
                False,
                "type":
                "ajax",
            }]

        additionalData = {"pagerId": elementId + "_pager", "pages": pages}
    else:
        # nothing of the above. error
        resultHtml = "Wrong request"

    return json.dumps({
        "elementId": elementId,
        "html": resultHtml,
        "additionalData": additionalData
    })


#        return "<li id=\"ajax_generated_li\"><span>Ajax generated position " + element_id + "</span><ul id=\"ble_ajaxgenerated_ul\"><li>element</li></ul></li>"


def get_entries_fragment(req, year, month, day, start, limit, filter):
    try:
        uid = getUid(req)
    except Error, e:
        return "unauthorised access !"
    auth = check_user(req, 'cfgoaiharvest')
    if not auth[0]:
        return oha.perform_request_gethpdayfragment(int(year), int(month),
Пример #35
0
    def _process_json_request(self, form, req):
        """Takes care about the json requests."""

        argd = wash_urlargd(form, {
                           self._JSON_DATA_KEY: (str, ""),
                           })

        # load json data
        json_data_string = argd[self._JSON_DATA_KEY]
        json_data_unicode = json.loads(json_data_string)
        json_data = json_unicode_to_utf8(json_data_unicode)

        language = json_data["language"]
        search_criteria = json_data["searchCriteria"]
        output_tags = json_data["outputTags"]
        output_tags = output_tags.split(',')
        output_tags = [tag.strip() for tag in output_tags]
        action_type = json_data["actionType"]
        current_record_id = json_data["currentRecordID"]
        commands = json_data["commands"]
        output_format = json_data["outputFormat"]
        page_to_display = json_data["pageToDisplay"]
        collection = json_data["collection"]
        compute_modifications = json_data["compute_modifications"]
        checked_records = json_data["checked_records"]

        json_response = {}
        if action_type == self._action_types.test_search:
            json_response.update(multi_edit_engine.perform_request_test_search(
                                                    search_criteria,
                                                    [],
                                                    output_format,
                                                    page_to_display,
                                                    language,
                                                    output_tags,
                                                    collection,
                                                    checked_records))
            json_response['display_info_box'] = 1
            json_response['info_html'] = ""
            return json.dumps(json_response)

        elif action_type == self._action_types.display_detailed_record:
            json_response.update(multi_edit_engine.perform_request_detailed_record(
                                                    current_record_id,
                                                    [],
                                                    output_format,
                                                    language))
            return json.dumps(json_response)

        elif action_type == self._action_types.preview_results:
            commands_list, upload_mode, tag_list = self._create_commands_list(commands)
            json_response = {}
            json_response.update(multi_edit_engine.perform_request_test_search(
                                                    search_criteria,
                                                    commands_list,
                                                    output_format,
                                                    page_to_display,
                                                    language,
                                                    output_tags,
                                                    collection,
                                                    compute_modifications,
                                                    upload_mode,
                                                    checked_records))
            return json.dumps(json_response)

        elif action_type == self._action_types.display_detailed_result:
            commands_list, upload_mode, tag_list = self._create_commands_list(commands)
            json_response.update(multi_edit_engine.perform_request_detailed_record(
                                                    current_record_id,
                                                    commands_list,
                                                    output_format,
                                                    language))
            return json.dumps(json_response)

        elif action_type == self._action_types.submit_changes:
            commands_list, upload_mode, tag_list = self._create_commands_list(commands)
            json_response.update(multi_edit_engine.perform_request_submit_changes(search_criteria, commands_list, language, upload_mode, tag_list, collection, req, checked_records))
            return json.dumps(json_response)

        # In case we obtain wrong action type we return empty page.
        return " "
Пример #36
0
                            os.rename(os.path.join(icon_path, icon_name),
                                      os.path.join(icons_dir, icon_name))
                            added_files[key] = {
                                'name': filename,
                                'iconName': icon_name
                            }
                        except InvenioWebSubmitIconCreatorError, e:
                            # We could not create the icon
                            added_files[key] = {'name': filename}
                            continue
                    else:
                        raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST)

            # Send our response
            if CFG_JSON_AVAILABLE:
                return json.dumps(added_files)

    def upload_video(self, req, form):
        """
        A clone of uploadfile but for (large) videos.
        Does not copy the uploaded file to the websubmit directory.
        Instead, the path to the file is stored inside the submission directory.
        """
        def gcd(a, b):
            """ the euclidean algorithm """
            while a:
                a, b = b % a, a
            return b

        from invenio.bibencode_extract import extract_frames
        from invenio.bibencode_config import CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_DIR, CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_FNAME
Пример #37
0
 def get_drupal_data():
     data = json.load(urllib2.urlopen(DRUPAL_FEED))
     data = map(lambda x: str(x['name'].encode('utf-8')), data['tags'])
     return json.dumps(data)
Пример #38
0
            pages += [
            {
            "url": "%s/admin/bibharvest/oaiharvestadmin.py/get_entries_fragment?year=%s&month=%s&day=%s&start=%i&limit=%i&filter=%s" % (CFG_SITE_URL, nodeYear, nodeMonth, nodeDay, i * resultsPerPage, resultsPerPage, urlFilter),
            "selector": False,
            "type": "ajax",
            }]

        additionalData = {
               "pagerId": elementId + "_pager",
               "pages" : pages
           }
    else:
        # nothing of the above. error
        resultHtml = "Wrong request"

    return json.dumps({"elementId": elementId, "html" : resultHtml, "additionalData" : additionalData})

#        return "<li id=\"ajax_generated_li\"><span>Ajax generated position " + element_id + "</span><ul id=\"ble_ajaxgenerated_ul\"><li>element</li></ul></li>"

def get_entries_fragment(req, year, month, day, start, limit, filter):
    try:
        uid = getUid(req)
    except Error, e:
        return "unauthorised access !"
    auth = check_user(req, 'cfgoaiharvest')
    if not auth[0]:
        return oha.perform_request_gethpdayfragment(int(year), int(month), int(day), int(limit), int(start), filter)
    else:
        return "unauthorised access !"

Пример #39
0
    def index(self, req, form):
        """Handle all BibEdit requests.
        The responsibilities of this functions is:
        * JSON decoding and encoding.
        * Redirection, if necessary.
        * Authorization.
        * Calling the appropriate function from the engine.

        """
        uid = getUid(req)
        argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
        # Abort if the simplejson module isn't available
        if not CFG_JSON_AVAILABLE:
            title = 'Record Editor'
            body = '''Sorry, the record editor cannot operate when the
                `simplejson' module is not installed.  Please see the INSTALL
                file.'''
            return page(title       = title,
                        body        = body,
                        errors      = [],
                        warnings    = [],
                        uid         = uid,
                        language    = argd['ln'],
                        navtrail    = navtrail,
                        lastupdated = __lastupdated__,
                        req         = req,
                        body_css_classes = ['bibedit'])

        # If it is an Ajax request, extract any JSON data.
        ajax_request, recid = False, None
        if form.has_key('jsondata'):
            json_data = json.loads(str(form['jsondata']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            json_data = json_unicode_to_utf8(json_data)
            ajax_request = True
            if json_data.has_key('recID'):
                recid = json_data['recID']
            json_response = {'resultCode': 0, 'ID': json_data['ID']}

        # Authorization.
        user_info = collect_user_info(req)
        if user_info['email'] == 'guest':
            # User is not logged in.
            if not ajax_request:
                # Do not display the introductory recID selection box to guest
                # users (as it used to be with v0.99.0):
                dummy_auth_code, auth_message = acc_authorize_action(req,
                                                                     'runbibedit')
                referer = '/edit/'
                if self.recid:
                    referer = '/%s/%s/edit/' % (CFG_SITE_RECORD, self.recid)
                return page_not_authorized(req=req, referer=referer,
                                           text=auth_message, navtrail=navtrail)
            else:
                # Session has most likely timed out.
                json_response.update({'resultCode': 100})
                return json.dumps(json_response)

        elif self.recid:
            # Handle RESTful calls from logged in users by redirecting to
            # generic URL.
            redirect_to_url(req, '%s/%s/edit/#state=edit&recid=%s&recrev=%s' % (
                    CFG_SITE_SECURE_URL, CFG_SITE_RECORD, self.recid, ""))

        elif recid is not None:
            json_response.update({'recID': recid})
            if json_data['requestType'] == "getRecord":
                # Authorize access to record.
                if not user_can_edit_record_collection(req, recid):
                    json_response.update({'resultCode': 101})
                    return json.dumps(json_response)

        # Handle request.
        if not ajax_request:
            # Show BibEdit start page.
            body, errors, warnings = perform_request_init(uid, argd['ln'], req, __lastupdated__)
            title = 'Record Editor'
            return page(title       = title,
                        body        = body,
                        errors      = errors,
                        warnings    = warnings,
                        uid         = uid,
                        language    = argd['ln'],
                        navtrail    = navtrail,
                        lastupdated = __lastupdated__,
                        req         = req,
                        body_css_classes = ['bibedit'])
        else:
            # Handle AJAX request.
            json_response.update(perform_request_ajax(req, recid, uid,
                                                      json_data))
            return json.dumps(json_response)
    def index(self, req, form):
        """Handle all requests"""

        uid = getUid(req)
        argd = wash_urlargd(form, {
            'ln': (str, CFG_SITE_LANG),
            'state': (str, '')
        })
        ln = argd['ln']
        state = argd['state']
        _ = gettext_set_language(ln)

        # Abort if the simplejson module isn't available
        if not CFG_JSON_AVAILABLE:
            title = 'Authorlist Manager'
            body = '''Sorry, the record editor cannot operate when the
                `simplejson' module is not installed.  Please see the INSTALL
                file.'''
            return page(title=title,
                        body=body,
                        errors=[],
                        warnings=[],
                        uid=uid,
                        language=ln,
                        navtrail=navtrail,
                        lastupdated=__lastupdated__,
                        req=req)

        # Extract additional JSON data from form
        if 'options' in form:
            options = json.loads(str(form['options']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            options = json_unicode_to_utf8(options)

        # Authorization.
        not_authorized = authorlist_engine.user_authorization(req, ln)
        if not_authorized:
            return not_authorized

        # User is authorized, let's handle different states
        # if no state parameter, load the main page
        if state == '':

            return page(title=_('Author List Manager'),
                        metaheaderadd=authorlist_templates.index_header(),
                        body=authorlist_templates.body(),
                        errors=[],
                        warnings=[],
                        uid=uid,
                        language=ln,
                        navtrail=navtrail,
                        lastupdated=__lastupdated__,
                        req=req)

        elif state == 'itemize':
            data = authorlist_db.itemize(uid)

            req.content_type = 'application/json'
            req.write(json.dumps(data))

        # open paremeter set? initialize a Authorlist instance
        elif state == 'open':
            # if 'id' in url, check if user has right to modify this paper
            try:
                received = wash_urlargd(form, {'id': (str, None)})
                paper_id = received['id']

                if authorlist_engine.check_user_rights(uid, paper_id):
                    return page(
                        title=_('Author List Manager'),
                        metaheaderadd=authorlist_templates.list_header(),
                        body=authorlist_templates.body(),
                        errors=[],
                        warnings=[],
                        uid=uid,
                        language=ln,
                        navtrail=navtrail,
                        lastupdated=__lastupdated__,
                        req=req)
                else:
                    # no rights to modify this paper
                    redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))
            except:
                # redirect to the main page if weird stuff happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        # On load state we will answer with the JSON encoded data of the passed
        # paper id. Should usually not be directly surfed by the user.
        elif state == 'load':
            try:
                received = wash_urlargd(form, {'id': (str, None)})
                paper_id = received['id']
                data = authorlist_db.load(paper_id)

                req.content_type = 'application/json'
                req.write(json.dumps(data))
            except:
                # redirect to the main page if weird stuff happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        # The save state saves the send data in the database using the passed
        # paper id. Responds with a JSON object containing the id of the paper
        # as saved in the database. Should usually not be surfed directly by the
        # user
        elif state == 'save':
            try:
                received = wash_urlargd(form, {
                    'id': (str, None),
                    'data': (str, '')
                })
                paper_id = received['id']
                in_data = json.loads(received['data'])
                out_data = authorlist_db.save(paper_id, uid, in_data)

                req.content_type = 'application/json'
                req.write(json.dumps(out_data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        # Clones the paper with the given id in the database and responds with a
        # JSON object containing the id of the clone. Should usually not surfed
        # directly by the user.
        elif state == 'clone':
            try:
                received = wash_urlargd(form, {'id': (str, None)})
                paper_id = received['id']
                data = authorlist_db.clone(paper_id, uid)

                req.content_type = 'application/json'
                req.write(json.dumps(data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        # Transform the sent data into the format passed in the URL using a
        # authorlist_engine converter. Reponds with the MIME type of the
        # converter and offers it as a download (content-disposition header).
        elif state == 'export':
            try:
                received = wash_urlargd(form, {
                    'format': (str, None),
                    'data': (str, '')
                })
                data_format = received['format']
                data = received['data']

                converter = authorlist_engine.Converters.get(data_format)

                attachement = 'attachement; filename="%s"' % converter.FILE_NAME
                req.headers_out['Content-Type'] = converter.CONTENT_TYPE
                req.headers_out['Content-Disposition'] = attachement
                #redirect_to_url(req, authorlist_engine.dumps(data, converter))
                req.write(authorlist_engine.dumps(data, converter))
            except:
                # throw exception if something weird happens
                return sys.exc_info()

        elif state == 'delete':
            try:
                received = wash_urlargd(form, {'id': (str, None)})
                paper_id = received['id']

                data = authorlist_db.delete(paper_id)

                req.content_type = 'application/json'
                req.write(json.dumps(data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        elif state == 'import':
            try:
                received = wash_urlargd(form, {'importid': (str, None)})
                recID = received['importid']
                data = authorlist_engine.retrieve_data_from_record(recID)
                req.content_type = 'application/json'
                req.write(json.dumps(data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        elif state == 'importxml':
            try:
                received = wash_urlargd(form, {'xmlfile': (Field, None)})
                xml_string = received['xmlfile'].value
                import_data = authorlist_engine.retrieve_data_from_xml(
                    xml_string)
                req.content_type = 'application/json'
                req.write(json.dumps(import_data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))
        # No state given, just go to the main page.
        else:
            redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))
Пример #41
0
def getHoldingPenData(req, elementId):
    try:
        getUid(req)
    except Error:
        return "unauthorised access !"
    auth = check_user(req, 'cfgoaiharvest')
    if auth[0]:
        return "unauthorised access !"

    elements = elementId.split("_")
    resultHtml = None
    additionalData = None

    if len(elements) == 2:
        filter = elements[1]
        resultHtml = oha.perform_request_gethpyears(elements[0], filter)
    elif len(elements) == 3:
        # only the year is specified
        filter = elements[2]
        nodeYear = int(elements[1])
        resultHtml = oha.perform_request_gethpyear(elements[0], nodeYear,
                                                   filter)

    elif len(elements) == 4:
        # year and month specified
        nodeYear = int(elements[1])
        nodeMonth = int(elements[2])
        filter = elements[3]
        resultHtml = oha.perform_request_gethpmonth(elements[0], nodeYear,
                                                    nodeMonth, filter)

    elif len(elements) == 5:
        # year, month and day specified - returning the entries themselves
        nodeYear = int(elements[1])
        nodeMonth = int(elements[2])
        nodeDay = int(elements[3])
        filter = elements[4]
        daySize = get_holdingpen_day_size(nodeYear, nodeMonth, nodeDay, filter)
        resultHtml = """<li><div id="%s_pager"></div>&nbsp;</li>""" % (
            elementId, )
        resultsPerPage = 20
        numberOfPages = math.ceil(float(daySize) / resultsPerPage)
        pages = []
        urlFilter = urllib.quote(filter)
        for i in range(0, numberOfPages):
            pages += [{
                "url":
                "%s/admin/oaiharvest/oaiharvestadmin.py/get_entries_fragment?year=%s&month=%s&day=%s&start=%i&limit=%i&filter=%s"
                % (CFG_SITE_URL, nodeYear, nodeMonth, nodeDay,
                   i * resultsPerPage, resultsPerPage, urlFilter),
                "selector":
                False,
                "type":
                "ajax",
            }]

        additionalData = {"pagerId": elementId + "_pager", "pages": pages}
    else:
        # nothing of the above. error
        resultHtml = "Wrong request"

    return json.dumps({
        "elementId": elementId,
        "html": resultHtml,
        "additionalData": additionalData
    })
Пример #42
0
    def index(self, req, form):
        """Handle all BibEdit requests.
        The responsibilities of this functions is:
        * JSON decoding and encoding.
        * Redirection, if necessary.
        * Authorization.
        * Calling the appropriate function from the engine.

        """
        uid = getUid(req)
        argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})

        # If it is an Ajax request, extract any JSON data.
        ajax_request, recid = False, None
        if form.has_key('jsondata'):
            json_data = json.loads(str(form['jsondata']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            json_data = json_unicode_to_utf8(json_data)
            ajax_request = True
            if json_data.has_key('recID'):
                recid = json_data['recID']
            json_response = {'resultCode': 0, 'ID': json_data['ID']}

        # Authorization.
        user_info = collect_user_info(req)
        if user_info['email'] == 'guest':
            # User is not logged in.
            if not ajax_request:
                # Do not display the introductory recID selection box to guest
                # users (as it used to be with v0.99.0):
                auth_code, auth_message = acc_authorize_action(
                    req, 'runbibedit')
                referer = '/edit/'
                if self.recid:
                    referer = '/%s/%s/edit/' % (CFG_SITE_RECORD, self.recid)
                return page_not_authorized(req=req,
                                           referer=referer,
                                           text=auth_message,
                                           navtrail=navtrail)
            else:
                # Session has most likely timed out.
                json_response.update({'resultCode': 100})
                return json.dumps(json_response)

        elif self.recid:
            # Handle redirects from /record/<record id>/edit
            # generic URL.
            redirect_to_url(
                req, '%s/%s/edit/#state=edit&recid=%s&recrev=%s' %
                (CFG_SITE_SECURE_URL, CFG_SITE_RECORD, self.recid, ""))

        elif recid is not None:
            json_response.update({'recID': recid})
            if json_data['requestType'] == "getRecord":
                # Authorize access to record.
                if not user_can_edit_record_collection(req, recid):
                    json_response.update({'resultCode': 101})
                    return json.dumps(json_response)

        # Handle request.
        if not ajax_request:
            # Show BibEdit start page.
            body, errors, warnings = perform_request_init(
                uid, argd['ln'], req, __lastupdated__)
            title = 'Record Editor'
            return page(title=title,
                        body=body,
                        errors=errors,
                        warnings=warnings,
                        uid=uid,
                        language=argd['ln'],
                        navtrail=navtrail,
                        lastupdated=__lastupdated__,
                        req=req,
                        body_css_classes=['bibedit'])
        else:
            # Handle AJAX request.
            json_response.update(
                perform_request_ajax(req, recid, uid, json_data))
            return json.dumps(json_response)
Пример #43
0
    def upload_video(self, req, form):
        """
        A clone of uploadfile but for (large) videos.
        Does not copy the uploaded file to the websubmit directory.
        Instead, the path to the file is stored inside the submission directory.
        """
        def gcd(a, b):
            """ the euclidean algorithm """
            while a:
                a, b = b % a, a
            return b

        from invenio.bibencode_extract import extract_frames
        from invenio.bibencode_config import CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_DIR, CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_FNAME
        from invenio.bibencode_encode import determine_aspect
        from invenio.bibencode_utils import probe
        from invenio.bibencode_metadata import ffprobe_metadata
        from invenio.websubmit_config import CFG_WEBSUBMIT_TMP_VIDEO_PREFIX

        argd = wash_urlargd(
            form, {
                'doctype': (str, ''),
                'access': (str, ''),
                'indir': (str, ''),
                'session_id': (str, ''),
                'rename': (str, ''),
            })

        curdir = None
        if not form.has_key("indir") or \
               not form.has_key("doctype") or \
               not form.has_key("access"):
            raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST)
        else:
            curdir = os.path.join(CFG_WEBSUBMIT_STORAGEDIR, argd['indir'],
                                  argd['doctype'], argd['access'])

        user_info = collect_user_info(req)
        if form.has_key("session_id"):
            # Are we uploading using Flash, which does not transmit
            # cookie? The expect to receive session_id as a form
            # parameter.  First check that IP addresses do not
            # mismatch.

            uid = session.uid
            user_info = collect_user_info(uid)
            try:
                act_fd = file(os.path.join(curdir, 'act'))
                action = act_fd.read()
                act_fd.close()
            except:
                act = ""

        # Is user authorized to perform this action?
        (auth_code, auth_message) = acc_authorize_action(
            uid,
            "submit",
            authorized_if_no_roles=not isGuestUser(uid),
            verbose=0,
            doctype=argd['doctype'],
            act=action)
        if acc_is_role("submit", doctype=argd['doctype'],
                       act=action) and auth_code != 0:
            # User cannot submit
            raise apache.SERVER_RETURN(apache.HTTP_UNAUTHORIZED)
        else:
            # Process the upload and get the response
            json_response = {}
            for key, formfields in form.items():
                filename = key.replace("[]", "")
                if hasattr(formfields, "filename") and formfields.filename:
                    dir_to_open = os.path.abspath(
                        os.path.join(curdir, 'files', str(user_info['uid']),
                                     key))
                    try:
                        assert (
                            dir_to_open.startswith(CFG_WEBSUBMIT_STORAGEDIR))
                    except AssertionError:
                        register_exception(req=req,
                                           prefix='curdir="%s", key="%s"' %
                                           (curdir, key))
                        raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN)

                    if not os.path.exists(dir_to_open):
                        try:
                            os.makedirs(dir_to_open)
                        except OSError, e:
                            if e.errno != errno.EEXIST:
                                # If the issue is only that directory
                                # already exists, then continue, else
                                # report
                                register_exception(req=req, alert_admin=True)
                                raise apache.SERVER_RETURN(
                                    apache.HTTP_FORBIDDEN)

                    filename = formfields.filename
                    ## Before saving the file to disc, wash the filename (in particular
                    ## washing away UNIX and Windows (e.g. DFS) paths):
                    filename = os.path.basename(filename.split('\\')[-1])
                    filename = filename.strip()
                    if filename != "":
                        # Check that file does not already exist
                        while os.path.exists(
                                os.path.join(dir_to_open, filename)):
                            #dirname, basename, extension = decompose_file(new_destination_path)
                            basedir, name, extension = decompose_file(filename)
                            new_name = propose_next_docname(name)
                            filename = new_name + extension

                        #-------------#
                        # VIDEO STUFF #
                        #-------------#

                        ## Remove all previous uploads
                        filelist = os.listdir(
                            os.path.split(formfields.file.name)[0])
                        for afile in filelist:
                            if argd['access'] in afile:
                                os.remove(
                                    os.path.join(
                                        os.path.split(formfields.file.name)[0],
                                        afile))

                        ## Check if the file is a readable video
                        ## We must exclude all image and audio formats that are readable by ffprobe
                        if (os.path.splitext(filename)[1] in [
                                'jpg', 'jpeg', 'gif', 'tiff', 'bmp', 'png',
                                'tga', 'jp2', 'j2k', 'jpf', 'jpm', 'mj2',
                                'biff', 'cgm', 'exif', 'img', 'mng', 'pic',
                                'pict', 'raw', 'wmf', 'jpe', 'jif', 'jfif',
                                'jfi', 'tif', 'webp', 'svg', 'ai', 'ps', 'psd',
                                'wav', 'mp3', 'pcm', 'aiff', 'au', 'flac',
                                'wma', 'm4a', 'wv', 'oga', 'm4a', 'm4b', 'm4p',
                                'm4r', 'aac', 'mp4', 'vox', 'amr', 'snd'
                        ] or not probe(formfields.file.name)):
                            formfields.file.close()
                            raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN)

                        ## We have no "delete" attribute in Python 2.4
                        if sys.hexversion < 0x2050000:
                            ## We need to rename first and create a dummy file
                            ## Rename the temporary file for the garbage collector
                            new_tmp_fullpath = os.path.split(
                                formfields.file.name
                            )[0] + "/" + CFG_WEBSUBMIT_TMP_VIDEO_PREFIX + argd[
                                'access'] + "_" + os.path.split(
                                    formfields.file.name)[1]
                            os.rename(formfields.file.name, new_tmp_fullpath)
                            dummy = open(formfields.file.name, "w")
                            dummy.close()
                            formfields.file.close()
                        else:
                            # Mark the NamedTemporatyFile as not to be deleted
                            formfields.file.delete = False
                            formfields.file.close()
                            ## Rename the temporary file for the garbage collector
                            new_tmp_fullpath = os.path.split(
                                formfields.file.name
                            )[0] + "/" + CFG_WEBSUBMIT_TMP_VIDEO_PREFIX + argd[
                                'access'] + "_" + os.path.split(
                                    formfields.file.name)[1]
                            os.rename(formfields.file.name, new_tmp_fullpath)

                        # Write the path to the temp file to a file in STORAGEDIR
                        fp = open(os.path.join(dir_to_open, "filepath"), "w")
                        fp.write(new_tmp_fullpath)
                        fp.close()

                        fp = open(os.path.join(dir_to_open, "filename"), "w")
                        fp.write(filename)
                        fp.close()

                        ## We are going to extract some thumbnails for websubmit ##
                        sample_dir = os.path.join(
                            curdir, 'files', str(user_info['uid']),
                            CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_DIR)
                        try:
                            ## Remove old thumbnails
                            shutil.rmtree(sample_dir)
                        except OSError:
                            register_exception(req=req, alert_admin=False)
                        try:
                            os.makedirs(
                                os.path.join(curdir, 'files',
                                             str(user_info['uid']),
                                             sample_dir))
                        except OSError:
                            register_exception(req=req, alert_admin=False)
                        try:
                            extract_frames(
                                input_file=new_tmp_fullpath,
                                output_file=os.path.join(
                                    sample_dir,
                                    CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_FNAME
                                ),
                                size="600x600",
                                numberof=5)
                            json_response['frames'] = []
                            for extracted_frame in os.listdir(sample_dir):
                                json_response['frames'].append(extracted_frame)
                        except:
                            ## If the frame extraction fails, something was bad with the video
                            os.remove(new_tmp_fullpath)
                            register_exception(req=req, alert_admin=False)
                            raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN)

                        ## Try to detect the aspect. if this fails, the video is not readable
                        ## or a wrong file might have been uploaded
                        try:
                            (aspect, width,
                             height) = determine_aspect(new_tmp_fullpath)
                            if aspect:
                                aspx, aspy = aspect.split(':')
                            else:
                                the_gcd = gcd(width, height)
                                aspx = str(width / the_gcd)
                                aspy = str(height / the_gcd)
                            json_response['aspx'] = aspx
                            json_response['aspy'] = aspy
                        except TypeError:
                            ## If the aspect detection completely fails
                            os.remove(new_tmp_fullpath)
                            register_exception(req=req, alert_admin=False)
                            raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN)

                        ## Try to extract some metadata from the video container
                        metadata = ffprobe_metadata(new_tmp_fullpath)
                        json_response['meta_title'] = metadata['format'].get(
                            'TAG:title')
                        json_response['meta_description'] = metadata[
                            'format'].get('TAG:description')
                        json_response['meta_year'] = metadata['format'].get(
                            'TAG:year')
                        json_response['meta_author'] = metadata['format'].get(
                            'TAG:author')
                    ## Empty file name
                    else:
                        raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST)
                    ## We found our file, we can break the loop
                    break

            # Send our response
            if CFG_JSON_AVAILABLE:

                dumped_response = json.dumps(json_response)

                # store the response in the websubmit directory
                # this is needed if the submission is not finished and continued later
                response_dir = os.path.join(curdir, 'files',
                                            str(user_info['uid']), "response")
                try:
                    os.makedirs(response_dir)
                except OSError:
                    # register_exception(req=req, alert_admin=False)
                    pass
                fp = open(os.path.join(response_dir, "response"), "w")
                fp.write(dumped_response)
                fp.close()

                return dumped_response
Пример #44
0
                                        register_exception(req=req, alert_admin=True)
                                        raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN)
                            os.rename(os.path.join(icon_path, icon_name),
                                      os.path.join(icons_dir, icon_name))
                            added_files[key] = {'name': filename,
                                                'iconName': icon_name}
                        except InvenioWebSubmitIconCreatorError, e:
                            # We could not create the icon
                            added_files[key] = {'name': filename}
                            continue
                    else:
                        raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST)

            # Send our response
            if CFG_JSON_AVAILABLE:
                return json.dumps(added_files)


    def upload_video(self, req, form):
        """
        A clone of uploadfile but for (large) videos.
        Does not copy the uploaded file to the websubmit directory.
        Instead, the path to the file is stored inside the submission directory.
        """

        def gcd(a, b):
            """ the euclidean algorithm """
            while a:
                a, b = b % a, a
            return b
Пример #45
0
def getHoldingPenData(req, elementId):
    try:
        getUid(req)
    except Error:
        return "unauthorised access !"
    auth = check_user(req, "cfgoaiharvest")
    if auth[0]:
        return "unauthorised access !"

    elements = elementId.split("_")
    resultHtml = None
    additionalData = None

    if len(elements) == 2:
        filter = elements[1]
        resultHtml = oha.perform_request_gethpyears(elements[0], filter)
    elif len(elements) == 3:
        # only the year is specified
        filter = elements[2]
        nodeYear = int(elements[1])
        resultHtml = oha.perform_request_gethpyear(elements[0], nodeYear, filter)

    elif len(elements) == 4:
        # year and month specified
        nodeYear = int(elements[1])
        nodeMonth = int(elements[2])
        filter = elements[3]
        resultHtml = oha.perform_request_gethpmonth(elements[0], nodeYear, nodeMonth, filter)

    elif len(elements) == 5:
        # year, month and day specified - returning the entries themselves
        nodeYear = int(elements[1])
        nodeMonth = int(elements[2])
        nodeDay = int(elements[3])
        filter = elements[4]
        daySize = get_holdingpen_day_size(nodeYear, nodeMonth, nodeDay, filter)
        resultHtml = """<li><div id="%s_pager"></div>&nbsp;</li>""" % (elementId,)
        resultsPerPage = 20
        numberOfPages = math.ceil(float(daySize) / resultsPerPage)
        pages = []
        urlFilter = urllib.quote(filter)
        for i in range(0, numberOfPages):
            pages += [
                {
                    "baseurl": "%s/admin/oaiharvest/oaiharvestadmin.py/get_entries_fragment" % (CFG_SITE_URL,),
                    "selector": False,
                    "type": "ajax",
                    "year": nodeYear,
                    "month": nodeMonth,
                    "day": nodeDay,
                    "start": i * resultsPerPage,
                    "limit": resultsPerPage,
                    "filter": urlFilter,
                }
            ]

        additionalData = {"pagerId": elementId + "_pager", "pages": pages}
    else:
        # nothing of the above. error
        resultHtml = "Wrong request"

    return json.dumps({"elementId": elementId, "html": resultHtml, "additionalData": additionalData})