Esempio n. 1
0
def harvest_aps(from_param, until_param, perpage):
    """
    Performs a request to APS API servers retrieving JSON.
    """
    page = 1
    last_page = 1
    conn = APS_connect(from_param, until_param, page, perpage)
    if not conn:
        write_message("Fatal Error: Cannot reach APS servers. Aborting.")
        raise APSHarvesterConnectionError("Cannot connect to APS servers")
    if conn.headers['link']:
        links = conn.headers['link'].split(",")
        for l in links:
            if l.find('rel="next"') > 0:
                next_page = int(re.search('(?<=(page=))\w+', l).group(0))
            if l.find('rel="last"') > 0:
                last_page = int(re.search('(?<=(page=))\w+', l).group(0))

    # Fetch first page of data
    data = json.loads(conn.next())
    write_message("Data received from APS: \n%s" % (data,), verbose=5)
    records = []
    for d in data:
        records.append(APSRecord(None, d["doi"], last_modified=d['last_modified_at']))

    # Check for more pages
    if last_page > 1:
        for pagenum in range(next_page, last_page+1):
            conn = APS_connect(from_param, until_param, pagenum, perpage)
            data = json.loads(conn.next())
            write_message("Data received from APS: \n%s" % (data,), verbose=5)
            for d in data:
                records.append(APSRecord(None, d["doi"], last_modified=d['last_modified_at']))

    return records
Esempio n. 2
0
def get_redirection_data(label):
    """
    Returns all information about a given redirection identified by label.

    @param label: the label identifying the redirection
    @type label: string

    @returns: a dictionary with the following keys:
        * label: the label
        * plugin: the name of the plugin
        * parameters: the parameters that are passed to the plugin
            (deserialized from JSON)
        * creation_date: datetime object on when the redirection was first
            created.
        * modification_date: datetime object on when the redirection was
            last modified.
    @rtype: dict

    @raises ValueError: in case the label does not exist.
    """
    res = run_sql(
        "SELECT label, plugin, parameters, creation_date, modification_date FROM goto WHERE label=%s",
        (label, ))
    if res:
        return {
            'label': res[0][0],
            'plugin': CFG_GOTO_PLUGINS[res[0][1]],
            'parameters': json_unicode_to_utf8(json.loads(res[0][2])),
            'creation_date': res[0][3],
            'modification_date': res[0][4]
        }
    else:
        raise ValueError("%s label does not exist" % label)
Esempio n. 3
0
def get_kb_mappings_embedded_json(kb_name="",
                                  key="",
                                  value="",
                                  match_type="s",
                                  limit=None):
    """Get leftside/rightside mappings from kb kb_name formatted as json dict.
       The rightside is actually considered as a json string and hence embedded
       within the final result.

       If key given, give only those with left side (mapFrom) = key.
       If value given, give only those with right side (mapTo) = value.

       @param kb_name: the name of the kb
       @param key: include only lines matching this on left side in the results
       @param value: include only lines matching this on right side in the results
       @param match_type: s = substring match, e = exact match
       @param limit: maximum number of results to return (are ALL if set to None)
       @return a list of mappings
    """
    mappings = get_kb_mappings(kb_name, key, value, match_type)
    ret = []
    if limit is None:
        limit = len(mappings)
    for m in mappings[:limit]:
        label = m['value'] or m['key']
        value = m['key'] or m['value']
        ret.append({'label': label, 'value': json.loads(value)})
    return json.dumps(ret)
 def test_insert_via_curl(self):
     """batchuploader - robotupload insert via CLI curl"""
     curl_input_file = os.path.join(CFG_TMPDIR, 'curl_test.xml')
     open(curl_input_file, "w").write(self.marcxml)
     try:
         result = run_shell_command(
             '/usr/bin/curl -T %s %s -A %s -H "Content-Type: application/marcxml+xml"',
             [
                 curl_input_file, self.nonce_url,
                 make_user_agent_string('BatchUploader')
             ])[1]
         self.failUnless("[INFO]" in result)
         current_task = get_last_taskid()
         run_shell_command("%s/bibupload %%s" % CFG_BINDIR,
                           [str(current_task)])
         results = json.loads(
             open(self.callback_result_path).read())
         self.failUnless('results' in results,
                         '"%s" did not contained [INFO]' % result)
         self.assertEqual(len(results['results']), 1)
         self.assertEqual(results['nonce'], "1234")
         self.failUnless(results['results'][0]['success'])
         self.failUnless(results['results'][0]['recid'] > 0)
         self.failUnless(
             """<subfield code="a">Doe, John</subfield>"""
             in results['results'][0]['marcxml'],
             results['results'][0]['marcxml'])
     finally:
         os.remove(curl_input_file)
 def test_legacy_insert_via_curl(self):
     """batchuploader - robotupload legacy insert via CLI curl"""
     curl_input_file = os.path.join(CFG_TMPDIR, "curl_test.xml")
     open(curl_input_file, "w").write(self.marcxml)
     try:
         ## curl -F '[email protected]' -F 'mode=-i' [-F 'callback_url=http://...'] [-F 'nonce=1234'] http://cds.cern.ch/batchuploader/robotupload -A invenio_webupload
         code, result, err = run_shell_command(
             "/usr/bin/curl -v -F file=@%s -F 'mode=-i' -F callback_url=%s -F nonce=1234 %s -A %s",
             [curl_input_file, self.callback_url, self.legacy_url, make_user_agent_string("BatchUploader")],
         )
         self.failUnless("[INFO]" in result, "[INFO] not find in results: %s, %s" % (result, err))
         current_task = get_last_taskid()
         run_shell_command("%s/bibupload %%s" % CFG_BINDIR, [str(current_task)])
         results = json.loads(open(self.callback_result_path).read())
         self.failUnless("results" in results, '"%s" did not contained [INFO]' % result)
         self.assertEqual(len(results["results"]), 1)
         self.assertEqual(results["nonce"], "1234")
         self.failUnless(results["results"][0]["success"])
         self.failUnless(results["results"][0]["recid"] > 0)
         self.failUnless(
             """<subfield code="a">Doe, John</subfield>""" in results["results"][0]["marcxml"],
             results["results"][0]["marcxml"],
         )
     finally:
         os.remove(curl_input_file)
 def test_legacy_insert_via_curl(self):
     """batchuploader - robotupload legacy insert via CLI curl"""
     curl_input_file = os.path.join(CFG_TMPDIR, 'curl_test.xml')
     open(curl_input_file, "w").write(self.marcxml)
     try:
         ## curl -F '[email protected]' -F 'mode=-i' [-F 'callback_url=http://...'] [-F 'nonce=1234'] http://cds.cern.ch/batchuploader/robotupload -A invenio_webupload
         code, result, err = run_shell_command(
             "/usr/bin/curl -v -F file=@%s -F 'mode=-i' -F callback_url=%s -F nonce=1234 %s -A %s",
             [
                 curl_input_file, self.callback_url,
                 self.legacy_url,
                 make_user_agent_string('BatchUploader')
             ])
         self.failUnless(
             "[INFO]" in result,
             '[INFO] not find in results: %s, %s' % (result, err))
         current_task = get_last_taskid()
         run_shell_command("%s/bibupload %%s" % CFG_BINDIR,
                           [str(current_task)])
         results = json.loads(
             open(self.callback_result_path).read())
         self.failUnless('results' in results,
                         '"%s" did not contained [INFO]' % result)
         self.assertEqual(len(results['results']), 1)
         self.assertEqual(results['nonce'], "1234")
         self.failUnless(results['results'][0]['success'])
         self.failUnless(results['results'][0]['recid'] > 0)
         self.failUnless(
             """<subfield code="a">Doe, John</subfield>"""
             in results['results'][0]['marcxml'],
             results['results'][0]['marcxml'])
     finally:
         os.remove(curl_input_file)
Esempio n. 7
0
def get_redirection_data(label):
    """
    Returns all information about a given redirection identified by label.

    @param label: the label identifying the redirection
    @type label: string

    @returns: a dictionary with the following keys:
        * label: the label
        * plugin: the name of the plugin
        * parameters: the parameters that are passed to the plugin
            (deserialized from JSON)
        * creation_date: datetime object on when the redirection was first
            created.
        * modification_date: datetime object on when the redirection was
            last modified.
    @rtype: dict

    @raises ValueError: in case the label does not exist.
    """
    res = run_sql("SELECT label, plugin, parameters, creation_date, modification_date FROM goto WHERE label=%s", (label, ))
    if res:
        return {'label': res[0][0],
                 'plugin': CFG_GOTO_PLUGINS[res[0][1]],
                 'parameters': json_unicode_to_utf8(json.loads(res[0][2])),
                 'creation_date': res[0][3],
                 'modification_date': res[0][4]}
    else:
        raise ValueError("%s label does not exist" % label)
Esempio n. 8
0
def get_keywords_from_drupal():
    """
    Retrieve keywords from Drupal feed
    """
    from invenio.websubmit_functions.file_cacher import Cache
    import json
    import urllib2

    # Drupal's feed
    DRUPAL_FEED = "http://home.web.cern.ch/api/tags-json-feed"

    def get_drupal_data():
        data = json.load(urllib2.urlopen(DRUPAL_FEED))
        data = map(lambda x: str(x['name'].encode('utf-8')), data['tags'])
        return json.dumps(data)

    try:
        cached = Cache('keywords.json', expiration=5)
        if(cached.expired()):
            cached.write(get_drupal_data())
        data = cached.read()
    except:
        data = get_drupal_data()

    return [str(x.encode('utf-8')) for x in json.loads(data)]
Esempio n. 9
0
 def get_json_parameters_from_cli(option, dummy_opt_str, value,
                                  dummy_parser):
     try:
         option.parameters = json_unicode_to_utf8(json.loads(value))
     except Exception, err:
         raise optparse.OptionValueError(
             "Cannot parse as a valid JSON serialization the provided parameters: %s. %s"
             % (value, err))
    def manage(self, req, form):
        """ Web interface for the management of the info space """
        uid = getUid(req)
        argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})

        # If it is an Ajax request, extract any JSON data.
        ajax_request = False
        if 'jsondata' in form:
            json_data = json.loads(str(form['jsondata']))
            json_data = json_unicode_to_utf8(json_data)
            ajax_request = True
            json_response = {}

        # Authorization.
        user_info = collect_user_info(req)
        if user_info['email'] == 'guest':
            # User is not logged in.
            if not ajax_request:
                # Do not display the introductory recID selection box to guest
                # users (as it used to be with v0.99.0):
                dummy_auth_code, auth_message = acc_authorize_action(
                    req, 'runinfomanager')
                referer = '/info'
                return page_not_authorized(req=req,
                                           referer=referer,
                                           text=auth_message)
            else:
                # Session has most likely timed out.
                json_response.update({'status': "timeout"})
                return json.dumps(json_response)
        # Handle request.
        if not ajax_request:
            body, errors, warnings = perform_request_init_info_interface()
            title = 'Info Space Manager'
            return page(title=title,
                        body=body,
                        errors=errors,
                        warnings=warnings,
                        uid=uid,
                        language=argd['ln'],
                        req=req)
        else:
            # Handle AJAX request.
            if json_data["action"] == "listFiles":
                json_response.update(
                    perform_request_edit_file(json_data["filename"]))
                try:
                    return json.dumps(json_response)
                except UnicodeDecodeError:
                    # Error decoding, the file can be a pdf, image or any kind
                    # of file non-editable
                    return json.dumps({"status": "error_file_not_readable"})

            if json_data["action"] == "saveContent":
                return json.dumps(
                    perform_request_save_file(json_data["filename"],
                                              json_data["filecontent"]))
    def manage(self, req, form):
        """ Web interface for the management of the info space """
        uid = getUid(req)
        argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})

        # If it is an Ajax request, extract any JSON data.
        ajax_request = False
        if 'jsondata' in form:
            json_data = json.loads(str(form['jsondata']))
            json_data = json_unicode_to_utf8(json_data)
            ajax_request = True
            json_response = {}

        # Authorization.
        user_info = collect_user_info(req)
        if user_info['email'] == 'guest':
            # User is not logged in.
            if not ajax_request:
                # Do not display the introductory recID selection box to guest
                # users (as it used to be with v0.99.0):
                dummy_auth_code, auth_message = acc_authorize_action(req,
                                                                     'runinfomanager')
                referer = '/info'
                return page_not_authorized(req=req, referer=referer,
                                           text=auth_message)
            else:
                # Session has most likely timed out.
                json_response.update({'status': "timeout"})
                return json.dumps(json_response)
        # Handle request.
        if not ajax_request:
            body, errors, warnings = perform_request_init_info_interface()
            title = 'Info Space Manager'
            return page(title=title,
                        body=body,
                        errors=errors,
                        warnings=warnings,
                        uid=uid,
                        language=argd['ln'],
                        req=req)
        else:
            # Handle AJAX request.
            if json_data["action"] == "listFiles":
                json_response.update(perform_request_edit_file(json_data["filename"]))
                try:
                    return json.dumps(json_response)
                except UnicodeDecodeError:
                    # Error decoding, the file can be a pdf, image or any kind
                    # of file non-editable
                    return json.dumps({"status": "error_file_not_readable"})

            if json_data["action"] == "saveContent":
                return json.dumps(perform_request_save_file(json_data["filename"],
                                                            json_data["filecontent"]))
Esempio n. 12
0
    def json_req_profiler(self, req, form):
        if "ajaxProfile" in form:
            profiler = cProfile.Profile()
            return_val = profiler.runcall(func, self, req, form)

            results = cStringIO.StringIO()
            stats = pstats.Stats(profiler, stream=results)
            stats.sort_stats('cumulative')
            stats.print_stats(100)

            json_in = json.loads(str(form['jsondata']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            json_in = json_unicode_to_utf8(json_in)

            json_data = json.loads(return_val)
            json_data.update({"profilerStats": "<pre style='overflow: scroll'>" + json_in['requestType'] + results.getvalue() + "</pre>"})
            return json.dumps(json_data)
        else:
            return func(self, req, form)
Esempio n. 13
0
    def json_req_profiler(self, req, form):
        if "ajaxProfile" in form:
            profiler = cProfile.Profile()
            return_val = profiler.runcall(func, self, req, form)

            results = cStringIO.StringIO()
            stats = pstats.Stats(profiler, stream=results)
            stats.sort_stats('cumulative')
            stats.print_stats(100)

            json_in = json.loads(str(form['jsondata']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            json_in = json_unicode_to_utf8(json_in)

            json_data = json.loads(return_val)
            json_data.update({"profilerStats": "<pre style='overflow: scroll'>" + json_in['requestType'] + results.getvalue() + "</pre>"})
            return json.dumps(json_data)
        else:
            return func(self, req, form)
 def __extract_attribute(self, req):
     """
     Load from the request the given assertion, extract all the attribute
     to properly login the user, and verify that the data are actually
     both well formed and signed correctly.
     """
     from invenio.webinterface_handler import wash_urlargd
     args = wash_urlargd(
         req.form, {
             'assertion': (str, ''),
             'robot': (str, ''),
             'digest': (str, ''),
             'login_method': (str, '')
         })
     assertion = args['assertion']
     digest = args['digest']
     robot = args['robot']
     login_method = args['login_method']
     shared_key = load_robot_keys().get(login_method, {}).get(robot)
     if shared_key is None:
         raise InvenioWebAccessExternalAuthError(
             "A key does not exist for robot: %s, login_method: %s" %
             (robot, login_method))
     if not self.verify(shared_key, assertion, digest):
         raise InvenioWebAccessExternalAuthError(
             "The provided assertion does not validate against the digest %s for robot %s"
             % (repr(digest), repr(robot)))
     if self.use_zlib:
         try:
             ## Workaround to Perl implementation that does not add
             ## any padding to the base64 encoding.
             needed_pad = (4 - len(assertion) % 4) % 4
             assertion += needed_pad * '='
             assertion = decompress(base64.urlsafe_b64decode(assertion))
         except:
             raise InvenioWebAccessExternalAuthError(
                 "The provided assertion is corrupted")
     data = json_unicode_to_utf8(json.loads(assertion))
     if not isinstance(data, dict):
         raise InvenioWebAccessExternalAuthError(
             "The provided assertion is invalid")
     timeout = data[self.timeout_attribute_name]
     if timeout < time.time():
         raise InvenioWebAccessExternalAuthError(
             "The provided assertion is expired")
     userip = data.get(self.userip_attribute_name)
     if not self.check_user_ip or (normalize_ip(
             userip, self.check_user_ip) == normalize_ip(
                 req.remote_ip, self.check_user_ip)):
         return data
     else:
         raise InvenioWebAccessExternalAuthError(
             "The provided assertion has been issued for a different IP address (%s instead of %s)"
             % (userip, req.remote_ip))
Esempio n. 15
0
def harvest_aps(from_param, until_param, perpage):
    """
    Performs a request to APS API servers retrieving JSON.
    """
    page = 1
    last_page = 1
    conn = APS_connect(from_param, until_param, page, perpage)
    if not conn:
        write_message("Fatal Error: Cannot reach APS servers. Aborting.")
        raise APSHarvesterConnectionError("Cannot connect to APS servers")
    if conn.headers['link']:
        links = conn.headers['link'].split(",")
        for l in links:
            if l.find('rel="next"') > 0:
                next_page = int(re.search(r'(?<=(page=))\w+', l).group(0))
            if l.find('rel="last"') > 0:
                last_page = int(re.search(r'(?<=(page=))\w+', l).group(0))

    # Fetch first page of data
    data = json.loads(conn.next())
    write_message("Data received from APS: \n%s" % (data, ), verbose=5)
    records = []
    for d in data:
        records.append(
            APSRecord(None,
                      d["doi"],
                      last_modified=d['metadata_last_modified_at']))

    # Check for more pages
    if last_page > 1:
        for pagenum in range(next_page, last_page + 1):
            conn = APS_connect(from_param, until_param, pagenum, perpage)
            data = json.loads(conn.next())
            write_message("Data received from APS: \n%s" % (data, ), verbose=5)
            for d in data:
                records.append(
                    APSRecord(None,
                              d["doi"],
                              last_modified=d['metadata_last_modified_at']))

    return records
 def test_insert_with_callback(self):
     """batchuploader - robotupload insert with callback"""
     result = urllib2.urlopen(self.req_callback).read()
     self.failUnless("[INFO]" in result, '"%s" did not contained [INFO]' % result)
     current_task = get_last_taskid()
     run_shell_command("%s/bibupload %%s" % CFG_BINDIR, [str(current_task)])
     results = json.loads(open(self.callback_result_path).read())
     self.failUnless('results' in results)
     self.assertEqual(len(results['results']), 1)
     self.failUnless(results['results'][0]['success'])
     self.failUnless(results['results'][0]['recid'] > 0)
     self.failUnless("""<subfield code="a">Doe, John</subfield>""" in results['results'][0]['marcxml'], results['results'][0]['marcxml'])
Esempio n. 17
0
    def templates(self, req, form):
        """handle a edit/templates request"""
        uid = getUid(req)
        argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})

        # If it is an Ajax request, extract any JSON data.
        ajax_request = False
        if form.has_key('jsondata'):
            json_data = json.loads(str(form['jsondata']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            json_data = json_unicode_to_utf8(json_data)
            ajax_request = True
            json_response = {'resultCode': 0}

        # Authorization.
        user_info = collect_user_info(req)
        if user_info['email'] == 'guest':
            # User is not logged in.
            if not ajax_request:
                # Do not display the introductory recID selection box to guest
                # users (as it used to be with v0.99.0):
                dummy_auth_code, auth_message = acc_authorize_action(
                    req, 'runbibedit')
                referer = '/edit'
                return page_not_authorized(req=req,
                                           referer=referer,
                                           text=auth_message,
                                           navtrail=navtrail)
            else:
                # Session has most likely timed out.
                json_response.update({'resultCode': 100})
                return json.dumps(json_response)
        # Handle request.
        if not ajax_request:
            # Show BibEdit template management start page.
            body, errors, warnings = perform_request_init_template_interface()
            title = 'Record Editor Template Manager'
            return page(title=title,
                        body=body,
                        errors=errors,
                        warnings=warnings,
                        uid=uid,
                        language=argd['ln'],
                        navtrail=navtrail_bibedit,
                        lastupdated=__lastupdated__,
                        req=req,
                        body_css_classes=['bibedit'])
        else:
            # Handle AJAX request.
            json_response.update(
                perform_request_ajax_template_interface(json_data))
            return json.dumps(json_response)
 def test_insert_with_callback(self):
     """batchuploader - robotupload insert with callback"""
     result = urllib2.urlopen(self.req_callback).read()
     self.failUnless("[INFO]" in result, '"%s" did not contained [INFO]' % result)
     current_task = get_last_taskid()
     run_shell_command("%s/bibupload %%s" % CFG_BINDIR, [str(current_task)])
     results = json.loads(open(self.callback_result_path).read())
     self.failUnless('results' in results)
     self.assertEqual(len(results['results']), 1)
     self.failUnless(results['results'][0]['success'])
     self.failUnless(results['results'][0]['recid'] > 0)
     self.failUnless("""<subfield code="a">Doe, John</subfield>""" in results['results'][0]['marcxml'], results['results'][0]['marcxml'])
Esempio n. 19
0
    def templates(self, req, form):
        """handle a edit/templates request"""
        uid = getUid(req)
        argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})

        # If it is an Ajax request, extract any JSON data.
        ajax_request = False
        if form.has_key('jsondata'):
            json_data = json.loads(str(form['jsondata']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            json_data = json_unicode_to_utf8(json_data)
            ajax_request = True
            json_response = {'resultCode': 0}

        # Authorization.
        user_info = collect_user_info(req)
        if user_info['email'] == 'guest':
            # User is not logged in.
            if not ajax_request:
                # Do not display the introductory recID selection box to guest
                # users (as it used to be with v0.99.0):
                dummy_auth_code, auth_message = acc_authorize_action(req,
                                                                     'runbibedit')
                referer = '/edit'
                return page_not_authorized(req=req, referer=referer,
                                           text=auth_message, navtrail=navtrail)
            else:
                # Session has most likely timed out.
                json_response.update({'resultCode': 100})
                return json.dumps(json_response)
        # Handle request.
        if not ajax_request:
            # Show BibEdit template management start page.
            body, errors, warnings = perform_request_init_template_interface()
            title = 'Record Editor Template Manager'
            return page(title       = title,
                        body        = body,
                        errors      = errors,
                        warnings    = warnings,
                        uid         = uid,
                        language    = argd['ln'],
                        navtrail    = navtrail_bibedit,
                        lastupdated = __lastupdated__,
                        req         = req,
                        body_css_classes = ['bibedit'])
        else:
            # Handle AJAX request.
            json_response.update(perform_request_ajax_template_interface(json_data))
            return json.dumps(json_response)
 def test_insert_with_oracle(self):
     """batchuploader - robotupload insert with oracle special treatment"""
     import os
     if os.path.exists('/opt/invenio/var/log/invenio.err'):
         os.remove('/opt/invenio/var/log/invenio.err')
     result = urllib2.urlopen(self.req_oracle).read()
     self.failUnless("[INFO]" in result, '"%s" did not contained "[INFO]"' % result)
     current_task = get_last_taskid()
     run_shell_command("%s/bibupload %%s" % CFG_BINDIR, [str(current_task)])
     results = json.loads(open(self.callback_result_path).read())
     self.failUnless('results' in results, '"%s" did not contained "results" key' % results)
     self.assertEqual(len(results['results']), 1)
     self.failUnless(results['results'][0]['success'])
     self.failUnless(results['results'][0]['recid'] > 0)
     self.failUnless("""<subfield code="a">Doe, John</subfield>""" in results['results'][0]['marcxml'], results['results'][0]['marcxml'])
 def test_insert_with_oracle(self):
     """batchuploader - robotupload insert with oracle special treatment"""
     import os
     if os.path.exists('/opt/invenio/var/log/invenio.err'):
         os.remove('/opt/invenio/var/log/invenio.err')
     result = urllib2.urlopen(self.req_oracle).read()
     self.failUnless("[INFO]" in result, '"%s" did not contained "[INFO]"' % result)
     current_task = get_last_taskid()
     run_shell_command("%s/bibupload %%s" % CFG_BINDIR, [str(current_task)])
     results = json.loads(open(self.callback_result_path).read())
     self.failUnless('results' in results, '"%s" did not contained "results" key' % results)
     self.assertEqual(len(results['results']), 1)
     self.failUnless(results['results'][0]['success'])
     self.failUnless(results['results'][0]['recid'] > 0)
     self.failUnless("""<subfield code="a">Doe, John</subfield>""" in results['results'][0]['marcxml'], results['results'][0]['marcxml'])
 def test_insert_with_nonce(self):
     """batchuploader - robotupload insert with nonce"""
     result = urllib2.urlopen(self.req_nonce).read()
     self.failUnless("[INFO]" in result, '"%s" did not contained "[INFO]"' % result)
     current_task = get_last_taskid()
     run_shell_command("%s/bibupload %%s" % CFG_BINDIR, [str(current_task)])
     results = json.loads(open(self.callback_result_path).read())
     self.failUnless("results" in results, '"%s" did not contained "results" key' % results)
     self.assertEqual(len(results["results"]), 1)
     self.assertEqual(results["nonce"], "1234")
     self.failUnless(results["results"][0]["success"])
     self.failUnless(results["results"][0]["recid"] > 0)
     self.failUnless(
         """<subfield code="a">Doe, John</subfield>""" in results["results"][0]["marcxml"],
         results["results"][0]["marcxml"],
     )
 def test_insert_via_curl(self):
     """batchuploader - robotupload insert via CLI curl"""
     curl_input_file = os.path.join(CFG_TMPDIR, 'curl_test.xml')
     open(curl_input_file, "w").write(self.marcxml)
     try:
         result = run_shell_command('/usr/bin/curl -T %s %s -A %s -H "Content-Type: application/marcxml+xml"', [curl_input_file, self.nonce_url, make_user_agent_string('BatchUploader')])[1]
         self.failUnless("[INFO]" in result)
         current_task = get_last_taskid()
         run_shell_command("%s/bibupload %%s" % CFG_BINDIR, [str(current_task)])
         results = json.loads(open(self.callback_result_path).read())
         self.failUnless('results' in results, '"%s" did not contained [INFO]' % result)
         self.assertEqual(len(results['results']), 1)
         self.assertEqual(results['nonce'], "1234")
         self.failUnless(results['results'][0]['success'])
         self.failUnless(results['results'][0]['recid'] > 0)
         self.failUnless("""<subfield code="a">Doe, John</subfield>""" in results['results'][0]['marcxml'], results['results'][0]['marcxml'])
     finally:
         os.remove(curl_input_file)
 def __extract_attribute(self, req):
     """
     Load from the request the given assertion, extract all the attribute
     to properly login the user, and verify that the data are actually
     both well formed and signed correctly.
     """
     from invenio.webinterface_handler import wash_urlargd
     args = wash_urlargd(req.form, {
         'assertion': (str, ''),
         'robot': (str, ''),
         'digest': (str, ''),
         'login_method': (str, '')})
     assertion = args['assertion']
     digest = args['digest']
     robot = args['robot']
     login_method = args['login_method']
     shared_key = load_robot_keys().get(login_method, {}).get(robot)
     if shared_key is None:
         raise InvenioWebAccessExternalAuthError("A key does not exist for robot: %s, login_method: %s" % (robot, login_method))
     if not self.verify(shared_key, assertion, digest):
         raise InvenioWebAccessExternalAuthError("The provided assertion does not validate against the digest %s for robot %s" % (repr(digest), repr(robot)))
     if self.use_zlib:
         try:
             ## Workaround to Perl implementation that does not add
             ## any padding to the base64 encoding.
             needed_pad = (4 - len(assertion) % 4) % 4
             assertion += needed_pad * '='
             assertion = decompress(base64.urlsafe_b64decode(assertion))
         except:
             raise InvenioWebAccessExternalAuthError("The provided assertion is corrupted")
     data = json_unicode_to_utf8(json.loads(assertion))
     if not isinstance(data, dict):
         raise InvenioWebAccessExternalAuthError("The provided assertion is invalid")
     timeout = data[self.timeout_attribute_name]
     if timeout < time.time():
         raise InvenioWebAccessExternalAuthError("The provided assertion is expired")
     userip = data.get(self.userip_attribute_name)
     if not self.check_user_ip or (normalize_ip(userip, self.check_user_ip) == normalize_ip(req.remote_ip, self.check_user_ip)):
         return data
     else:
         raise InvenioWebAccessExternalAuthError("The provided assertion has been issued for a different IP address (%s instead of %s)" % (userip, req.remote_ip))
Esempio n. 25
0
def get_kb_mappings_embedded_json(kb_name="", key="", value="", match_type="s", limit=None):
    """Get leftside/rightside mappings from kb kb_name formatted as json dict.
       The rightside is actually considered as a json string and hence embedded
       within the final result.

       If key given, give only those with left side (mapFrom) = key.
       If value given, give only those with right side (mapTo) = value.

       @param kb_name: the name of the kb
       @param key: include only lines matching this on left side in the results
       @param value: include only lines matching this on right side in the results
       @param match_type: s = substring match, e = exact match
       @param limit: maximum number of results to return (are ALL if set to None)
       @return a list of mappings
    """
    mappings = get_kb_mappings(kb_name, key, value, match_type)
    ret = []
    if limit is None:
        limit = len(mappings)
    for m in mappings[:limit]:
        label = m['value'] or m['key']
        value = m['key'] or m['value']
        ret.append({'label': label, 'value': json.loads(value)})
    return json.dumps(ret)
Esempio n. 26
0
    def index(self, req, form):
        """Handle all BibEdit requests.
        The responsibilities of this functions is:
        * JSON decoding and encoding.
        * Redirection, if necessary.
        * Authorization.
        * Calling the appropriate function from the engine.

        """
        uid = getUid(req)
        argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
        # Abort if the simplejson module isn't available
        if not CFG_JSON_AVAILABLE:
            title = 'Record Editor'
            body = '''Sorry, the record editor cannot operate when the
                `simplejson' module is not installed.  Please see the INSTALL
                file.'''
            return page(title       = title,
                        body        = body,
                        errors      = [],
                        warnings    = [],
                        uid         = uid,
                        language    = argd['ln'],
                        navtrail    = navtrail,
                        lastupdated = __lastupdated__,
                        req         = req,
                        body_css_classes = ['bibedit'])

        # If it is an Ajax request, extract any JSON data.
        ajax_request, recid = False, None
        if form.has_key('jsondata'):
            json_data = json.loads(str(form['jsondata']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            json_data = json_unicode_to_utf8(json_data)
            ajax_request = True
            if json_data.has_key('recID'):
                recid = json_data['recID']
            json_response = {'resultCode': 0, 'ID': json_data['ID']}

        # Authorization.
        user_info = collect_user_info(req)
        if user_info['email'] == 'guest':
            # User is not logged in.
            if not ajax_request:
                # Do not display the introductory recID selection box to guest
                # users (as it used to be with v0.99.0):
                dummy_auth_code, auth_message = acc_authorize_action(req,
                                                                     'runbibedit')
                referer = '/edit/'
                if self.recid:
                    referer = '/%s/%s/edit/' % (CFG_SITE_RECORD, self.recid)
                return page_not_authorized(req=req, referer=referer,
                                           text=auth_message, navtrail=navtrail)
            else:
                # Session has most likely timed out.
                json_response.update({'resultCode': 100})
                return json.dumps(json_response)

        elif self.recid:
            # Handle RESTful calls from logged in users by redirecting to
            # generic URL.
            redirect_to_url(req, '%s/%s/edit/#state=edit&recid=%s&recrev=%s' % (
                    CFG_SITE_SECURE_URL, CFG_SITE_RECORD, self.recid, ""))

        elif recid is not None:
            json_response.update({'recID': recid})
            if json_data['requestType'] == "getRecord":
                # Authorize access to record.
                if not user_can_edit_record_collection(req, recid):
                    json_response.update({'resultCode': 101})
                    return json.dumps(json_response)

        # Handle request.
        if not ajax_request:
            # Show BibEdit start page.
            body, errors, warnings = perform_request_init(uid, argd['ln'], req, __lastupdated__)
            title = 'Record Editor'
            return page(title       = title,
                        body        = body,
                        errors      = errors,
                        warnings    = warnings,
                        uid         = uid,
                        language    = argd['ln'],
                        navtrail    = navtrail,
                        lastupdated = __lastupdated__,
                        req         = req,
                        body_css_classes = ['bibedit'])
        else:
            # Handle AJAX request.
            json_response.update(perform_request_ajax(req, recid, uid,
                                                      json_data))
            return json.dumps(json_response)
Esempio n. 27
0
def task_submit_check_options():
    """ Checks the tasks arguments for validity
    """

    #----------------#
    # General Checks #
    #----------------#

    ## FFMPEG CONFIGURATION ##
    ## The status of ffmpeg should be checked before a task is submitted
    ## There is a minimum configuration that ffmpeg must be compiled with
    ## See bibencode_utils and bibencode_config
    config = check_ffmpeg_configuration()
    if config:
        ## Prints missing configuration
        string = ''
        for item in config:
            string += ('\t' + item + '\n')
        write_message(
            "FFmpeg options are missing. Please recompile and add:\n" + string
        )
        return False

    ## MODE ##
    ## Check if the mode is a valid
    if _topt('mode') is None:
        write_message('You have to specify a mode using \'-m MODE\'')
        return False
    if _topt('mode') not in CFG_BIBENCODE_VALID_MODES:
        write_message('%s is not a valid mode. Use one of %s'
                      % (_topt('mode'), CFG_BIBENCODE_VALID_MODES))
        return False

    ## INPUT ##
    ## Check if the input file is given and if it exists
    ## You should allways use an absolute path to the file
    if _topt('mode') in ('encode', 'extract', 'meta', 'batch'):
        if _topt('input') is None:
            write_message('You must specify an input file using \'-i FILE\'')
            return False
        else:
            if not os.path.exists(_topt('input')):
                print("The file %s does not exist" % _topt('input'))
                return False

    ## OUTPUT ##
    ## Check if the output file is given and if it exists
    ## You should allways use an absolute path to the file
    if _topt('mode') in ('encode', 'extract', 'meta'):
        if _topt('output') is None:
            write_message('No output file is given. Please specify with'
                          ' \'-o NAME\''
                          )
            return False

    #---------------#
    # Encoding Mode #
    #---------------#
    if _topt('mode') == 'encode':

        ## PROFILE ## Check for a valid profile if this is given
        if _topt('profile_name') is not None:
            if _topt('profile_name') not in get_encoding_profiles():
                write_message('%s not found in %s' %
                              (_topt('profile_name'),
                               CFG_BIBENCODE_PROFILES_ENCODING)
                              )
                return False
            ## If the profile exists
            else:
                pass

        ## AUDIOCODEC ##
        ## Checks if the audiocodec is one of the predefined
        if _topt('acodec') is not None:
            if _topt('acodec') not in CFG_BIBENCODE_FFMPEG_VALID_ACODECS:
                write_message(
                    '%s is not a valid audiocodec.\nAvailable codecs: %s'
                    % (_topt('acodec'), CFG_BIBENCODE_FFMPEG_VALID_ACODECS)
                )
                return False

        ## VIDEOCODEC ## Checks if the videocodec is one of the predefined
        if _topt('vcodec') is not None:
            if _topt('vcodec') not in CFG_BIBENCODE_FFMPEG_VALID_VCODECS:
                write_message(
                    '%s is not a valid videocodec.\nAvailable codecs: %s'
                    % (_topt('vcodec'), CFG_BIBENCODE_FFMPEG_VALID_VCODECS)
                )
                return False

        ## SIZE ##
        ## Checks if the size is either WxH or an FFMPEG preset
        if _topt('size') is not None:
            if not CFG_BIBENCODE_FFMPEG_RE_VALID_SIZE.match(_topt('size')):
                if _topt('size') not in CFG_BIBENCODE_FFMPEG_VALID_SIZES:
                    write_message(
                        '%s is not a valid frame size.\nEither use the'
                        ' \'WxH\' notation or one of these values:\n%s'
                        % (_topt('size'), CFG_BIBENCODE_FFMPEG_VALID_SIZES)
                    )
                    return False
        ## Check if both a size and vertical or horizontal resolution
        if (_topt('width') or _topt('height')) and _topt('size'):
            write_message('Options \'width\' and \'height\' can not be '
                          'combined with \'resolution\'')
            return False

        ## PASSES ##
        ## If a number of passes is given, it should be either 1 oder 2.
        ## You could do an infinite number of passes with ffmpeg,
        ## But it will almost never make a difference above 2 passes.
        ## So, we currently only support 2 passes.
        if _topt('passes') is not None:
            if _topt('passes') not in (1, 2):
                write_message('The number of passes must be either 1 or 2')
                return False
        else:
            task_set_option('passes', 1)

        ## BITRATE ##
        ## Check if the given bitrate is either 1000 sth. or 1000k sth.
        if _topt('abitrate') is not None:
            pass
        if _topt('vbitrate') is not None:
            pass

    #-----------------#
    # Extraction Mode #
    #-----------------#
    elif _topt('mode') == 'extract':

        ## PROFILE ##
        ## If a profile is given, check its validity
        if _topt('profile_name') is not None:
            if _topt('profile_name') not in get_extract_profiles():
                write_message('%s not found in %s' %
                              (_topt('profile_name'),
                               CFG_BIBENCODE_PROFILES_EXTRACT)
                              )
                return False
            ## If the profile exists
            else:
                pass

        ## You cannot give both a number and specific positions
        ## !!! Think about allowing both -> First extract by number,
        ## !!! then additionally the specific positions
        if (
            ((_topt('numberof') is not None) and
            (_topt('positions') is not None))
            or
            ((_topt('numberof') is None) and
            (_topt('positions') is None))
            ):
            write_message('Please specify either a number of frames to '
                          'take or specific positions')
            return False

        ## SIZE ##
        ## Checks if the size is either WxH or an FFMPEG specific value
        if _topt('size') is not None:
            if not CFG_BIBENCODE_FFMPEG_RE_VALID_SIZE.match(_topt('size')):
                if _topt('size') not in CFG_BIBENCODE_FFMPEG_VALID_SIZES:
                    write_message(
                        '%s is not a valid frame size.\nEither use the'
                        '\'WxH\' notation or one of these valus:\n%s'
                        % (_topt('size'), CFG_BIBENCODE_FFMPEG_VALID_SIZES)
                    )
                    return False

    #---------------#
    # Metadata Mode #
    #---------------#
    elif _topt('mode') == 'meta':

        ## You have to give exactly one meta suboption
        if not _xor(_topt('meta_input'),
                   _topt('meta_dump')):
            write_message("You can either dump or write metadata")
            return False

        ## METADATA INPUT ##
        if _topt('meta_input') is not None:
            ## Check if this is either a filename (that should exist)
            ## or if this a jsonic metadata notation
            if os.path.exists(_topt('meta_input')):
                pass
            else:
                try:
                    metadict = json.loads(_topt('meta_input'))
                    task_set_option('meta_input', metadict)
                except ValueError:
                    write_message('The value %s of the \'--meta\' parameter is '
                                  'neither a valid filename nor a jsonic dict'
                                  % _topt('meta_input'))
                    return False

    #------------#
    # Batch Mode #
    #------------#
    elif _topt('mode') == 'batch':
        if _topt('collection') and _topt('search'):
            write_message('You can either use \'search\' or \'collection\'')
            return False
        elif _topt('collection'):
            template = json_decode_file(_topt('input'))
            print('\n')
            print("#---------------------------------------------#")
            print("# YOU ARE ABOUT TO UPDATE A WHOLE COLLECTION  #")
            print("#---------------------------------------------#")
            print('\n')
            print('The selected template file contains:')
            pprint(template)
            print('\n')
        elif _topt('search'):
            template = json_decode_file(_topt('input'))
            message = "# YOU ARE ABOUT TO UPDATE RECORDS MATCHING '%s'  #" % _topt('search')
            print('\n')
            print("#" + "-"*(len(message)-2) + "#")
            print(message)
            print("#" + "-"*(len(message)-2) + "#")
            print('\n')
            print('The selected template file contains:')
            pprint(template)
            print('\n')


    #-------------#
    # Daemon Mode #
    #-------------#
    elif _topt('mode') == 'daemon':
        task_set_task_param('task_specific_name', 'daemon')
        ## You can either give none or both folders, but not only one
        if _xor(_topt('new_job_folder'), _topt('old_job_folder')):
            write_message('When specifying folders for the daemon mode, you '
                          'have to specify both the folder for the new jobs '
                          'and the old ones')
            return False


    ## If every check went fine
    return True
    def index(self, req, form):
        """Handle all requests"""

        uid = getUid(req)
        argd = wash_urlargd(form, {'ln' : (str, CFG_SITE_LANG),
                           'state' : (str, '')})
        ln = argd['ln']
        state = argd['state']
        _ = gettext_set_language(ln)

        # Abort if the simplejson module isn't available
        if not CFG_JSON_AVAILABLE:
            title = 'Authorlist Manager'
            body = '''Sorry, the record editor cannot operate when the
                `simplejson' module is not installed.  Please see the INSTALL
                file.'''
            return page(title       = title,
                        body        = body,
                        errors      = [],
                        warnings    = [],
                        uid         = uid,
                        language    = ln,
                        navtrail    = navtrail,
                        lastupdated = __lastupdated__,
                        req         = req)

        # Extract additional JSON data from form
        if 'options' in form:
            options = json.loads(str(form['options']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            options = json_unicode_to_utf8(options)

        # Authorization.
        not_authorized = authorlist_engine.user_authorization(req, ln)
        if not_authorized:
            return not_authorized

        # User is authorized, let's handle different states
        # if no state parameter, load the main page
        if state == '':

            return page(title         = _('Author List Manager'),
                        metaheaderadd = authorlist_templates.index_header(),
                        body          = authorlist_templates.body(),
                        errors        = [],
                        warnings      = [],
                        uid           = uid,
                        language      = ln,
                        navtrail      = navtrail,
                        lastupdated   = __lastupdated__,
                        req           = req)

        elif state == 'itemize':
            data = authorlist_db.itemize(uid)

            req.content_type = 'application/json'
            req.write(json.dumps(data))

        # open paremeter set? initialize a Authorlist instance
        elif state == 'open':
            # if 'id' in url, check if user has right to modify this paper
            try:
                received = wash_urlargd(form, {'id': (str, None)})
                paper_id = received['id']

                if authorlist_engine.check_user_rights(uid, paper_id):
                    return page(title         = _('Author List Manager'),
                        metaheaderadd = authorlist_templates.list_header(),
                        body          = authorlist_templates.body(),
                        errors        = [],
                        warnings      = [],
                        uid           = uid,
                        language      = ln,
                        navtrail      = navtrail,
                        lastupdated   = __lastupdated__,
                        req           = req)
                else:
                    # no rights to modify this paper
                    redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))
            except:
                # redirect to the main page if weird stuff happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))
                        
        # On load state we will answer with the JSON encoded data of the passed 
        # paper id. Should usually not be directly surfed by the user.
        elif state == 'load':
            try:
                received = wash_urlargd(form, {'id': (str, None)})
                paper_id = received['id']
                data = authorlist_db.load(paper_id)
                
                req.content_type = 'application/json'
                req.write(json.dumps(data))
            except:
                # redirect to the main page if weird stuff happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        # The save state saves the send data in the database using the passed
        # paper id. Responds with a JSON object containing the id of the paper
        # as saved in the database. Should usually not be surfed directly by the
        # user
        elif state == 'save':
            try:
                received = wash_urlargd(form, {'id': (str, None),
                                               'data': (str, '')})
                paper_id = received['id']
                in_data = json.loads(received['data'])
                out_data = authorlist_db.save(paper_id, uid, in_data)

                req.content_type = 'application/json'
                req.write(json.dumps(out_data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        # Clones the paper with the given id in the database and responds with a
        # JSON object containing the id of the clone. Should usually not surfed
        # directly by the user.
        elif state == 'clone':
            try:
                received = wash_urlargd(form, {'id': (str, None)})
                paper_id = received['id']
                data = authorlist_db.clone(paper_id, uid)

                req.content_type = 'application/json'
                req.write(json.dumps(data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        # Transform the sent data into the format passed in the URL using a
        # authorlist_engine converter. Reponds with the MIME type of the
        # converter and offers it as a download (content-disposition header).
        elif state == 'export':
            try:
                received = wash_urlargd(form, {'format': (str, None),
                                               'data': (str, '')})
                data_format = received['format']
                data = received['data']

                converter = authorlist_engine.Converters.get(data_format)

                attachement = 'attachement; filename="%s"' % converter.FILE_NAME
                req.headers_out['Content-Type'] = converter.CONTENT_TYPE
                req.headers_out['Content-Disposition'] = attachement
                #redirect_to_url(req, authorlist_engine.dumps(data, converter))
                req.write(authorlist_engine.dumps(data, converter))
            except:
                # throw exception if something weird happens
                return sys.exc_info()

        elif state == 'delete':
            try:
                received = wash_urlargd(form, {'id': (str, None)})
                paper_id = received['id']

                data = authorlist_db.delete(paper_id)

                req.content_type = 'application/json'
                req.write(json.dumps(data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        elif state == 'import':
            try:
                received = wash_urlargd(form, {'importid': (str, None)})
                recID = received['importid']
                data = authorlist_engine.retrieve_data_from_record(recID)
                req.content_type = 'application/json'
                req.write(json.dumps(data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        elif state == 'importxml':
            try:
                received = wash_urlargd(form, {'xmlfile': (Field, None)})
                xml_string = received['xmlfile'].value
                import_data = authorlist_engine.retrieve_data_from_xml(xml_string)
                req.content_type = 'application/json'
                req.write(json.dumps(import_data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))
        # No state given, just go to the main page.
        else:
            redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))
    def index(self, req, form):
        """Handle all BibMerge requests.
        The responsibilities of this functions are:
        * JSON decoding and encoding.
        * Redirection, if necessary.
        * Authorization.
        * Calling the appropriate function from the engine.
        """
        # If it is an Ajax request, extract any JSON data.
        ajax_request, recid1, recid2 = False, None, None
        argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
        if form.has_key('jsondata'):
            json_data = json.loads(str(form['jsondata']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            json_data = json_unicode_to_utf8(json_data)
            ajax_request = True
            json_response = {}
            if json_data.has_key('recID1'):
                recid1 = json_data['recID1']
            if json_data.has_key('recID2'):
                recid2 = json_data['recID2']

        # Authorization.
        user_info = collect_user_info(req)
        if user_info['email'] == 'guest':
            # User is not logged in.
            if not ajax_request:
                # Do not display the introductory recID selection box to guest
                # users (as it used to be with v0.99.0):
                auth_code, auth_message = acc_authorize_action(req, 'runbibmerge')
                referer = '/merge/'
                return page_not_authorized(req=req, referer=referer,
                                           text=auth_message, navtrail=navtrail)
            else:
                # Session has most likely timed out.
                json_response.update({'resultCode': 1,
                                      'resultText': 'Error: Not logged in'})
                return json.dumps(json_response)

        elif self.recid:
            # Handle RESTful call by storing recid and redirecting to
            # generic URL.
            redirect_to_url(req, '%s/%s/merge/' % (CFG_SITE_SECURE_URL, CFG_SITE_RECORD) )

        if recid1 is not None:
            # Authorize access to record 1.
            auth_code, auth_message = acc_authorize_action(req, 'runbibmerge',
                collection=guess_primary_collection_of_a_record(recid1))
            if auth_code != 0:
                json_response.update({'resultCode': 1, 'resultText': 'No access to record %s' % recid1})
                return json.dumps(json_response)
        if recid2 is not None:
            # Authorize access to record 2.
            auth_code, auth_message = acc_authorize_action(req, 'runbibmerge',
                collection=guess_primary_collection_of_a_record(recid2))
            if auth_code != 0:
                json_response.update({'resultCode': 1, 'resultText': 'No access to record %s' % recid2})
                return json.dumps(json_response)

        # Handle request.
        uid = getUid(req)
        if not ajax_request:
            # Show BibEdit start page.
            body, errors, warnings = perform_request_init()
            metaheaderadd = """<script type="text/javascript" src="%(site)s/js/json2.js"></script>
  <script type="text/javascript" src="%(site)s/js/bibmerge_engine.js"></script>""" % {'site': CFG_SITE_SECURE_URL}
            title = 'Record Merger'
            return page(title         = title,
                        metaheaderadd = metaheaderadd,
                        body          = body,
                        errors        = errors,
                        warnings      = warnings,
                        uid           = uid,
                        language      = argd['ln'],
                        navtrail      = navtrail,
                        lastupdated   = __lastupdated__,
                        req           = req)
        else:
            # Handle AJAX request.
            json_response = perform_request_ajax(req, uid, json_data)
            return json.dumps(json_response)
Esempio n. 30
0
def task_submit_elaborate_specific_parameter(key, value, opts, args):
    """ Given the string key it checks it's meaning, eventually using the
    value. Usually it fills some key in the options dict.
    It must return True if it has elaborated the key, False, if it doesn't
    know that key.
    eg:
    if key in ('-n', '--number'):
        self.options['number'] = value
        return True
    return False
    """
    ## A dictionary used for mapping CLI parameters to task_option keys+-
    parameter_mapping = {
        '-p': 'profile_name',
        '-i': 'input',
        '--input': 'input',
        '-o': 'output',
        '--output': 'output',
        '-m': 'mode',
        '--mode': 'mode',
        '--acodec': 'acodec',
        '--vcodec': 'vcodec',
        '--abitrate': 'abitrate',
        '--vbitrate': 'vbitrate',
        '--resolution': 'size',
        '--passes': 'passes',
        '--special': 'special',
        '--specialfirst': 'specialfirst',
        '--specialsecond': 'specialsecond',
        '--width': 'width',
        '--height': 'height',
        '--aspect': 'aspect',
        '--number': 'numberof',
        '--positions': 'positions',
        '-D': 'meta_dump',
        '-W': 'meta_input',
        '--dump': 'meta_dump',
        '--write': 'meta_input',
        '--newjobfolder': 'new_job_folder',
        '--oldjobfolder': 'old_job_folder',
        '--recid': 'recid',
        '--collection': 'collection',
        '--search': 'search'
    }

    ## PASSES ##
    ## Transform 'passes' to integer
    if key in ('--passes', ):
        try:
            value = int(value)
        except ValueError:
            write_message('Value of \'--passes\' must be an integer')
            return False

    ## HEIGHT, WIDTH ##
    if key in ('--height', '--width'):
        try:
            value = int(value)
        except ValueError:
            write_message('Value of \'--height\' or \'--width\''
                          ' must be an integer')
            return False

    ## META MODE ##
    ## Transform meta mode values to boolean
    if key in ('-D', '--dump'):
        if not value in ("ffprobe", "mediainfo", "pbcore"):
            write_message(
                "Unknown dumping format, must be 'ffprobe', 'mediainfo' or 'pbcore'"
            )
            return False
    if key in ('--substitute', ):
        value = True
    ## Transform the 'positions' parameter into a list
    if key in ('--positions', ):
        try:
            parsed = json.loads(value)
            if type(parsed) is not type(list()):
                write_message('Value of \'--positions\' must be a json list')
                return False
            else:
                value = parsed
        except ValueError:
            write_message('Value of \'--positions\' must be a json list')
            return False

    ## NUMBEROF ##
    ## Transform 'number' to integer
    if key in ('--number'):
        try:
            value = int(value)
        except ValueError:
            write_message('Value of \'--number\' must be an integer')
            return False
    ## ASPECT ##
    if key in ('--aspect'):
        try:
            xasp, yasp = str(value).split(':')
            xasp = float(xasp)
            yasp = float(yasp)
            value = xasp / yasp
        except:
            write_message('Value of \'--aspect\' must be in \'4:3\' format')
            return False
    ## RECID ##
    if key in ('--recid'):
        try:
            value = int(value)
        except ValueError:
            write_message('Value of \'--recid\' must be an integer')
            return False

    ## GENERAL MAPPING ##
    ## For all general or other parameters just use the mapping dictionary
    if key in parameter_mapping:
        task_set_option(parameter_mapping[key], value)
        return True
    return False
    def index(self, req, form):
        """Handle all requests"""

        uid = getUid(req)
        argd = wash_urlargd(form, {
            'ln': (str, CFG_SITE_LANG),
            'state': (str, '')
        })
        ln = argd['ln']
        state = argd['state']
        _ = gettext_set_language(ln)

        # Abort if the simplejson module isn't available
        if not CFG_JSON_AVAILABLE:
            title = 'Authorlist Manager'
            body = '''Sorry, the record editor cannot operate when the
                `simplejson' module is not installed.  Please see the INSTALL
                file.'''
            return page(title=title,
                        body=body,
                        errors=[],
                        warnings=[],
                        uid=uid,
                        language=ln,
                        navtrail=navtrail,
                        lastupdated=__lastupdated__,
                        req=req)

        # Extract additional JSON data from form
        if 'options' in form:
            options = json.loads(str(form['options']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            options = json_unicode_to_utf8(options)

        # Authorization.
        not_authorized = authorlist_engine.user_authorization(req, ln)
        if not_authorized:
            return not_authorized

        # User is authorized, let's handle different states
        # if no state parameter, load the main page
        if state == '':

            return page(title=_('Author List Manager'),
                        metaheaderadd=authorlist_templates.index_header(),
                        body=authorlist_templates.body(),
                        errors=[],
                        warnings=[],
                        uid=uid,
                        language=ln,
                        navtrail=navtrail,
                        lastupdated=__lastupdated__,
                        req=req)

        elif state == 'itemize':
            data = authorlist_db.itemize(uid)

            req.content_type = 'application/json'
            req.write(json.dumps(data))

        # open paremeter set? initialize a Authorlist instance
        elif state == 'open':
            # if 'id' in url, check if user has right to modify this paper
            try:
                received = wash_urlargd(form, {'id': (str, None)})
                paper_id = received['id']

                if authorlist_engine.check_user_rights(uid, paper_id):
                    return page(
                        title=_('Author List Manager'),
                        metaheaderadd=authorlist_templates.list_header(),
                        body=authorlist_templates.body(),
                        errors=[],
                        warnings=[],
                        uid=uid,
                        language=ln,
                        navtrail=navtrail,
                        lastupdated=__lastupdated__,
                        req=req)
                else:
                    # no rights to modify this paper
                    redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))
            except:
                # redirect to the main page if weird stuff happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        # On load state we will answer with the JSON encoded data of the passed
        # paper id. Should usually not be directly surfed by the user.
        elif state == 'load':
            try:
                received = wash_urlargd(form, {'id': (str, None)})
                paper_id = received['id']
                data = authorlist_db.load(paper_id)

                req.content_type = 'application/json'
                req.write(json.dumps(data))
            except:
                # redirect to the main page if weird stuff happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        # The save state saves the send data in the database using the passed
        # paper id. Responds with a JSON object containing the id of the paper
        # as saved in the database. Should usually not be surfed directly by the
        # user
        elif state == 'save':
            try:
                received = wash_urlargd(form, {
                    'id': (str, None),
                    'data': (str, '')
                })
                paper_id = received['id']
                in_data = json.loads(received['data'])
                out_data = authorlist_db.save(paper_id, uid, in_data)

                req.content_type = 'application/json'
                req.write(json.dumps(out_data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        # Clones the paper with the given id in the database and responds with a
        # JSON object containing the id of the clone. Should usually not surfed
        # directly by the user.
        elif state == 'clone':
            try:
                received = wash_urlargd(form, {'id': (str, None)})
                paper_id = received['id']
                data = authorlist_db.clone(paper_id, uid)

                req.content_type = 'application/json'
                req.write(json.dumps(data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        # Transform the sent data into the format passed in the URL using a
        # authorlist_engine converter. Reponds with the MIME type of the
        # converter and offers it as a download (content-disposition header).
        elif state == 'export':
            try:
                received = wash_urlargd(form, {
                    'format': (str, None),
                    'data': (str, '')
                })
                data_format = received['format']
                data = received['data']

                converter = authorlist_engine.Converters.get(data_format)

                attachement = 'attachement; filename="%s"' % converter.FILE_NAME
                req.headers_out['Content-Type'] = converter.CONTENT_TYPE
                req.headers_out['Content-Disposition'] = attachement
                #redirect_to_url(req, authorlist_engine.dumps(data, converter))
                req.write(authorlist_engine.dumps(data, converter))
            except:
                # throw exception if something weird happens
                return sys.exc_info()

        elif state == 'delete':
            try:
                received = wash_urlargd(form, {'id': (str, None)})
                paper_id = received['id']

                data = authorlist_db.delete(paper_id)

                req.content_type = 'application/json'
                req.write(json.dumps(data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        elif state == 'import':
            try:
                received = wash_urlargd(form, {'importid': (str, None)})
                recID = received['importid']
                data = authorlist_engine.retrieve_data_from_record(recID)
                req.content_type = 'application/json'
                req.write(json.dumps(data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))

        elif state == 'importxml':
            try:
                received = wash_urlargd(form, {'xmlfile': (Field, None)})
                xml_string = received['xmlfile'].value
                import_data = authorlist_engine.retrieve_data_from_xml(
                    xml_string)
                req.content_type = 'application/json'
                req.write(json.dumps(import_data))
            except:
                # redirect to the main page if something weird happens
                redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))
        # No state given, just go to the main page.
        else:
            redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))
Esempio n. 32
0
    def _process_json_request(self, form, req):
        """Takes care about the json requests."""

        argd = wash_urlargd(form, {self._JSON_DATA_KEY: (str, "")})

        # load json data
        json_data_string = argd[self._JSON_DATA_KEY]
        json_data_unicode = json.loads(json_data_string)
        json_data = json_unicode_to_utf8(json_data_unicode)

        language = json_data["language"]
        search_criteria = json_data["searchCriteria"]
        output_tags = json_data["outputTags"]
        output_tags = output_tags.split(",")
        output_tags = [tag.strip() for tag in output_tags]
        action_type = json_data["actionType"]
        current_record_id = json_data["currentRecordID"]
        commands = json_data["commands"]
        output_format = json_data["outputFormat"]
        page_to_display = json_data["pageToDisplay"]
        collection = json_data["collection"]
        compute_modifications = json_data["compute_modifications"]
        checked_records = json_data["checked_records"]

        json_response = {}
        if action_type == self._action_types.test_search:
            json_response.update(
                multi_edit_engine.perform_request_test_search(
                    search_criteria,
                    [],
                    output_format,
                    page_to_display,
                    language,
                    output_tags,
                    collection,
                    req=req,
                    checked_records=checked_records,
                )
            )
            json_response["display_info_box"] = 1
            json_response["info_html"] = ""
            return json.dumps(json_response)

        elif action_type == self._action_types.display_detailed_record:
            json_response.update(
                multi_edit_engine.perform_request_detailed_record(current_record_id, [], output_format, language)
            )
            return json.dumps(json_response)

        elif action_type == self._action_types.preview_results:
            commands_list, upload_mode, tag_list = self._create_commands_list(commands)
            json_response = {}
            json_response.update(
                multi_edit_engine.perform_request_test_search(
                    search_criteria,
                    commands_list,
                    output_format,
                    page_to_display,
                    language,
                    output_tags,
                    collection,
                    compute_modifications,
                    upload_mode,
                    req,
                    checked_records,
                )
            )
            return json.dumps(json_response)

        elif action_type == self._action_types.display_detailed_result:
            commands_list, upload_mode, tag_list = self._create_commands_list(commands)
            json_response.update(
                multi_edit_engine.perform_request_detailed_record(
                    current_record_id, commands_list, output_format, language
                )
            )
            return json.dumps(json_response)

        elif action_type == self._action_types.submit_changes:
            commands_list, upload_mode, tag_list = self._create_commands_list(commands)
            json_response.update(
                multi_edit_engine.perform_request_submit_changes(
                    search_criteria, commands_list, language, upload_mode, tag_list, collection, req, checked_records
                )
            )
            return json.dumps(json_response)

        # In case we obtain wrong action type we return empty page.
        return " "
Esempio n. 33
0
def task_submit_check_options():
    """ Checks the tasks arguments for validity
    """

    #----------------#
    # General Checks #
    #----------------#

    ## FFMPEG CONFIGURATION ##
    ## The status of ffmpeg should be checked before a task is submitted
    ## There is a minimum configuration that ffmpeg must be compiled with
    ## See bibencode_utils and bibencode_config
    config = check_ffmpeg_configuration()
    if config:
        ## Prints missing configuration
        string = ''
        for item in config:
            string += ('\t' + item + '\n')
        write_message(
            "FFmpeg options are missing. Please recompile and add:\n" + string)
        return False

    ## MODE ##
    ## Check if the mode is a valid
    if _topt('mode') is None:
        write_message('You have to specify a mode using \'-m MODE\'')
        return False
    if _topt('mode') not in CFG_BIBENCODE_VALID_MODES:
        write_message('%s is not a valid mode. Use one of %s' %
                      (_topt('mode'), CFG_BIBENCODE_VALID_MODES))
        return False

    ## INPUT ##
    ## Check if the input file is given and if it exists
    ## You should allways use an absolute path to the file
    if _topt('mode') in ('encode', 'extract', 'meta', 'batch'):
        if _topt('input') is None:
            write_message('You must specify an input file using \'-i FILE\'')
            return False
        else:
            if not os.path.exists(_topt('input')):
                print("The file %s does not exist" % _topt('input'))
                return False

    ## OUTPUT ##
    ## Check if the output file is given and if it exists
    ## You should allways use an absolute path to the file
    if _topt('mode') in ('encode', 'extract', 'meta'):
        if _topt('output') is None:
            write_message('No output file is given. Please specify with'
                          ' \'-o NAME\'')
            return False

    #---------------#
    # Encoding Mode #
    #---------------#
    if _topt('mode') == 'encode':

        ## PROFILE ## Check for a valid profile if this is given
        if _topt('profile_name') is not None:
            if _topt('profile_name') not in get_encoding_profiles():
                write_message(
                    '%s not found in %s' %
                    (_topt('profile_name'), CFG_BIBENCODE_PROFILES_ENCODING))
                return False
            ## If the profile exists
            else:
                pass

        ## AUDIOCODEC ##
        ## Checks if the audiocodec is one of the predefined
        if _topt('acodec') is not None:
            if _topt('acodec') not in CFG_BIBENCODE_FFMPEG_VALID_ACODECS:
                write_message(
                    '%s is not a valid audiocodec.\nAvailable codecs: %s' %
                    (_topt('acodec'), CFG_BIBENCODE_FFMPEG_VALID_ACODECS))
                return False

        ## VIDEOCODEC ## Checks if the videocodec is one of the predefined
        if _topt('vcodec') is not None:
            if _topt('vcodec') not in CFG_BIBENCODE_FFMPEG_VALID_VCODECS:
                write_message(
                    '%s is not a valid videocodec.\nAvailable codecs: %s' %
                    (_topt('vcodec'), CFG_BIBENCODE_FFMPEG_VALID_VCODECS))
                return False

        ## SIZE ##
        ## Checks if the size is either WxH or an FFMPEG preset
        if _topt('size') is not None:
            if not CFG_BIBENCODE_FFMPEG_RE_VALID_SIZE.match(_topt('size')):
                if _topt('size') not in CFG_BIBENCODE_FFMPEG_VALID_SIZES:
                    write_message(
                        '%s is not a valid frame size.\nEither use the'
                        ' \'WxH\' notation or one of these values:\n%s' %
                        (_topt('size'), CFG_BIBENCODE_FFMPEG_VALID_SIZES))
                    return False
        ## Check if both a size and vertical or horizontal resolution
        if (_topt('width') or _topt('height')) and _topt('size'):
            write_message('Options \'width\' and \'height\' can not be '
                          'combined with \'resolution\'')
            return False

        ## PASSES ##
        ## If a number of passes is given, it should be either 1 oder 2.
        ## You could do an infinite number of passes with ffmpeg,
        ## But it will almost never make a difference above 2 passes.
        ## So, we currently only support 2 passes.
        if _topt('passes') is not None:
            if _topt('passes') not in (1, 2):
                write_message('The number of passes must be either 1 or 2')
                return False
        else:
            task_set_option('passes', 1)

        ## BITRATE ##
        ## Check if the given bitrate is either 1000 sth. or 1000k sth.
        if _topt('abitrate') is not None:
            pass
        if _topt('vbitrate') is not None:
            pass

    #-----------------#
    # Extraction Mode #
    #-----------------#
    elif _topt('mode') == 'extract':

        ## PROFILE ##
        ## If a profile is given, check its validity
        if _topt('profile_name') is not None:
            if _topt('profile_name') not in get_extract_profiles():
                write_message(
                    '%s not found in %s' %
                    (_topt('profile_name'), CFG_BIBENCODE_PROFILES_EXTRACT))
                return False
            ## If the profile exists
            else:
                pass

        ## You cannot give both a number and specific positions
        ## !!! Think about allowing both -> First extract by number,
        ## !!! then additionally the specific positions
        if (((_topt('numberof') is not None) and
             (_topt('positions') is not None)) or
            ((_topt('numberof') is None) and (_topt('positions') is None))):
            write_message('Please specify either a number of frames to '
                          'take or specific positions')
            return False

        ## SIZE ##
        ## Checks if the size is either WxH or an FFMPEG specific value
        if _topt('size') is not None:
            if not CFG_BIBENCODE_FFMPEG_RE_VALID_SIZE.match(_topt('size')):
                if _topt('size') not in CFG_BIBENCODE_FFMPEG_VALID_SIZES:
                    write_message(
                        '%s is not a valid frame size.\nEither use the'
                        '\'WxH\' notation or one of these valus:\n%s' %
                        (_topt('size'), CFG_BIBENCODE_FFMPEG_VALID_SIZES))
                    return False

    #---------------#
    # Metadata Mode #
    #---------------#
    elif _topt('mode') == 'meta':

        ## You have to give exactly one meta suboption
        if not _xor(_topt('meta_input'), _topt('meta_dump')):
            write_message("You can either dump or write metadata")
            return False

        ## METADATA INPUT ##
        if _topt('meta_input') is not None:
            ## Check if this is either a filename (that should exist)
            ## or if this a jsonic metadata notation
            if os.path.exists(_topt('meta_input')):
                pass
            else:
                try:
                    metadict = json.loads(_topt('meta_input'))
                    task_set_option('meta_input', metadict)
                except ValueError:
                    write_message(
                        'The value %s of the \'--meta\' parameter is '
                        'neither a valid filename nor a jsonic dict' %
                        _topt('meta_input'))
                    return False

    #------------#
    # Batch Mode #
    #------------#
    elif _topt('mode') == 'batch':
        if _topt('collection') and _topt('search'):
            write_message('You can either use \'search\' or \'collection\'')
            return False
        elif _topt('collection'):
            template = json_decode_file(_topt('input'))
            print('\n')
            print("#---------------------------------------------#")
            print("# YOU ARE ABOUT TO UPDATE A WHOLE COLLECTION  #")
            print("#---------------------------------------------#")
            print('\n')
            print('The selected template file contains:')
            pprint(template)
            print('\n')
        elif _topt('search'):
            template = json_decode_file(_topt('input'))
            message = "# YOU ARE ABOUT TO UPDATE RECORDS MATCHING '%s'  #" % _topt(
                'search')
            print('\n')
            print("#" + "-" * (len(message) - 2) + "#")
            print(message)
            print("#" + "-" * (len(message) - 2) + "#")
            print('\n')
            print('The selected template file contains:')
            pprint(template)
            print('\n')

    #-------------#
    # Daemon Mode #
    #-------------#
    elif _topt('mode') == 'daemon':
        task_set_task_param('task_specific_name', 'daemon')
        ## You can either give none or both folders, but not only one
        if _xor(_topt('new_job_folder'), _topt('old_job_folder')):
            write_message('When specifying folders for the daemon mode, you '
                          'have to specify both the folder for the new jobs '
                          'and the old ones')
            return False

    ## If every check went fine
    return True
Esempio n. 34
0
 def get_json_parameters_from_cli(dummy_option, dummy_opt_str, value, parser):
     try:
         setattr(parser.values, 'parameters', json_unicode_to_utf8(json.loads(value)))
     except Exception, err:
         raise optparse.OptionValueError("Cannot parse as a valid JSON serialization the provided parameters: %s. %s" % (value, err))
    def index(self, req, form):
        """Handle all BibMerge requests.
        The responsibilities of this functions are:
        * JSON decoding and encoding.
        * Redirection, if necessary.
        * Authorization.
        * Calling the appropriate function from the engine.
        """
        # If it is an Ajax request, extract any JSON data.
        ajax_request, recid1, recid2 = False, None, None
        argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
        if form.has_key('jsondata'):
            json_data = json.loads(str(form['jsondata']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            json_data = json_unicode_to_utf8(json_data)
            ajax_request = True
            json_response = {}
            try:
                if json_data.has_key('recID1'):
                    recid1 = int(json_data['recID1'])
                    json_data['recID1'] = recid1
                if json_data.has_key('recID2'):
                    if json_data.get('record2Mode') == "recid":
                        recid2 = int(json_data['recID2'])
                        json_data['recID2'] = recid2
            except ValueError:
                json_response.update({
                    'resultCode': 1,
                    'resultText': 'Invalid record ID!'
                })
                return json.dumps(json_response)
            if json_data.has_key("duplicate"):
                if json_data.get('record2Mode') == "recid":
                    json_data["duplicate"] = int(json_data["duplicate"])

        # Authorization.
        user_info = collect_user_info(req)
        if user_info['email'] == 'guest':
            # User is not logged in.
            if not ajax_request:
                # Do not display the introductory recID selection box to guest
                # users (as it used to be with v0.99.0):
                auth_code, auth_message = acc_authorize_action(
                    req, 'runbibmerge')
                referer = '/merge/'
                return page_not_authorized(req=req,
                                           referer=referer,
                                           text=auth_message,
                                           navtrail=navtrail)
            else:
                # Session has most likely timed out.
                json_response.update({
                    'resultCode': 1,
                    'resultText': 'Error: Not logged in'
                })
                return json.dumps(json_response)

        elif self.recid:
            # Handle RESTful call by storing recid and redirecting to
            # generic URL.
            redirect_to_url(
                req, '%s/%s/merge/' % (CFG_SITE_SECURE_URL, CFG_SITE_RECORD))

        if recid1 is not None:
            # Authorize access to record 1.
            auth_code, auth_message = acc_authorize_action(
                req,
                'runbibmerge',
                collection=guess_primary_collection_of_a_record(recid1))
            if auth_code != 0:
                json_response.update({
                    'resultCode':
                    1,
                    'resultText':
                    'No access to record %s' % recid1
                })
                return json.dumps(json_response)
        if recid2 is not None:
            # Authorize access to record 2.
            auth_code, auth_message = acc_authorize_action(
                req,
                'runbibmerge',
                collection=guess_primary_collection_of_a_record(recid2))
            if auth_code != 0:
                json_response.update({
                    'resultCode':
                    1,
                    'resultText':
                    'No access to record %s' % recid2
                })
                return json.dumps(json_response)

        # Handle request.
        uid = getUid(req)
        if not ajax_request:
            # Show BibEdit start page.
            body, errors, warnings = perform_request_init()

            scripts = ["json2.js", "bibmerge_engine.js"]
            metaheaderadd = ""
            for script in scripts:
                metaheaderadd += '<script type="text/javascript" src="%s/%s"></script>' % (
                    CFG_SITE_URL, auto_version_url("js/" + script))

            return page(title='Record Merger',
                        metaheaderadd=metaheaderadd,
                        body=body,
                        errors=errors,
                        warnings=warnings,
                        uid=uid,
                        language=argd['ln'],
                        navtrail=navtrail,
                        lastupdated=__lastupdated__,
                        req=req)
        else:
            # Handle AJAX request.
            json_response = perform_request_ajax(req, uid, json_data)
            return json.dumps(json_response)
Esempio n. 36
0
    def _process_json_request(self, form, req):
        """Takes care about the json requests."""

        argd = wash_urlargd(form, {
                           self._JSON_DATA_KEY: (str, ""),
                           })

        # load json data
        json_data_string = argd[self._JSON_DATA_KEY]
        json_data_unicode = json.loads(json_data_string)
        json_data = json_unicode_to_utf8(json_data_unicode)

        language = json_data["language"]
        search_criteria = json_data["searchCriteria"]
        output_tags = json_data["outputTags"]
        output_tags = output_tags.split(',')
        output_tags = [tag.strip() for tag in output_tags]
        action_type = json_data["actionType"]
        current_record_id = json_data["currentRecordID"]
        commands = json_data["commands"]
        output_format = json_data["outputFormat"]
        page_to_display = json_data["pageToDisplay"]
        collection = json_data["collection"]
        compute_modifications = json_data["compute_modifications"]
        checked_records = json_data["checked_records"]

        json_response = {}
        if action_type == self._action_types.test_search:
            json_response.update(multi_edit_engine.perform_request_test_search(
                                                    search_criteria,
                                                    [],
                                                    output_format,
                                                    page_to_display,
                                                    language,
                                                    output_tags,
                                                    collection,
                                                    checked_records))
            json_response['display_info_box'] = 1
            json_response['info_html'] = ""
            return json.dumps(json_response)

        elif action_type == self._action_types.display_detailed_record:
            json_response.update(multi_edit_engine.perform_request_detailed_record(
                                                    current_record_id,
                                                    [],
                                                    output_format,
                                                    language))
            return json.dumps(json_response)

        elif action_type == self._action_types.preview_results:
            commands_list, upload_mode, tag_list = self._create_commands_list(commands)
            json_response = {}
            json_response.update(multi_edit_engine.perform_request_test_search(
                                                    search_criteria,
                                                    commands_list,
                                                    output_format,
                                                    page_to_display,
                                                    language,
                                                    output_tags,
                                                    collection,
                                                    compute_modifications,
                                                    upload_mode,
                                                    checked_records))
            return json.dumps(json_response)

        elif action_type == self._action_types.display_detailed_result:
            commands_list, upload_mode, tag_list = self._create_commands_list(commands)
            json_response.update(multi_edit_engine.perform_request_detailed_record(
                                                    current_record_id,
                                                    commands_list,
                                                    output_format,
                                                    language))
            return json.dumps(json_response)

        elif action_type == self._action_types.submit_changes:
            commands_list, upload_mode, tag_list = self._create_commands_list(commands)
            json_response.update(multi_edit_engine.perform_request_submit_changes(search_criteria, commands_list, language, upload_mode, tag_list, collection, req, checked_records))
            return json.dumps(json_response)

        # In case we obtain wrong action type we return empty page.
        return " "
Esempio n. 37
0
 def parse_arg(argument_str, arg_name):
     try:
         return encode(json.loads(argument_str))
     except ValueError:
         raise RulesParseError(rule_name, "Invalid value in argument '%s'" %
                                           arg_name)
Esempio n. 38
0
 def parse_arg(argument_str, arg_name):
     try:
         return encode(json.loads(argument_str))
     except ValueError:
         raise RulesParseError(rule_name,
                               "Invalid value in argument '%s'" % arg_name)
Esempio n. 39
0
    def index(self, req, form):
        """Handle all BibEdit requests.
        The responsibilities of this functions is:
        * JSON decoding and encoding.
        * Redirection, if necessary.
        * Authorization.
        * Calling the appropriate function from the engine.

        """
        uid = getUid(req)
        argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})

        # If it is an Ajax request, extract any JSON data.
        ajax_request, recid = False, None
        if form.has_key('jsondata'):
            json_data = json.loads(str(form['jsondata']))
            # Deunicode all strings (Invenio doesn't have unicode
            # support).
            json_data = json_unicode_to_utf8(json_data)
            ajax_request = True
            if json_data.has_key('recID'):
                recid = json_data['recID']
            json_response = {'resultCode': 0, 'ID': json_data['ID']}

        # Authorization.
        user_info = collect_user_info(req)
        if user_info['email'] == 'guest':
            # User is not logged in.
            if not ajax_request:
                # Do not display the introductory recID selection box to guest
                # users (as it used to be with v0.99.0):
                auth_code, auth_message = acc_authorize_action(
                    req, 'runbibedit')
                referer = '/edit/'
                if self.recid:
                    referer = '/%s/%s/edit/' % (CFG_SITE_RECORD, self.recid)
                return page_not_authorized(req=req,
                                           referer=referer,
                                           text=auth_message,
                                           navtrail=navtrail)
            else:
                # Session has most likely timed out.
                json_response.update({'resultCode': 100})
                return json.dumps(json_response)

        elif self.recid:
            # Handle redirects from /record/<record id>/edit
            # generic URL.
            redirect_to_url(
                req, '%s/%s/edit/#state=edit&recid=%s&recrev=%s' %
                (CFG_SITE_SECURE_URL, CFG_SITE_RECORD, self.recid, ""))

        elif recid is not None:
            json_response.update({'recID': recid})
            if json_data['requestType'] == "getRecord":
                # Authorize access to record.
                if not user_can_edit_record_collection(req, recid):
                    json_response.update({'resultCode': 101})
                    return json.dumps(json_response)

        # Handle request.
        if not ajax_request:
            # Show BibEdit start page.
            body, errors, warnings = perform_request_init(
                uid, argd['ln'], req, __lastupdated__)
            title = 'Record Editor'
            return page(title=title,
                        body=body,
                        errors=errors,
                        warnings=warnings,
                        uid=uid,
                        language=argd['ln'],
                        navtrail=navtrail,
                        lastupdated=__lastupdated__,
                        req=req,
                        body_css_classes=['bibedit'])
        else:
            # Handle AJAX request.
            json_response.update(
                perform_request_ajax(req, recid, uid, json_data))
            return json.dumps(json_response)
Esempio n. 40
0
def task_submit_elaborate_specific_parameter(key, value, opts, args):
    """ Given the string key it checks it's meaning, eventually using the
    value. Usually it fills some key in the options dict.
    It must return True if it has elaborated the key, False, if it doesn't
    know that key.
    eg:
    if key in ('-n', '--number'):
        self.options['number'] = value
        return True
    return False
    """
    ## A dictionary used for mapping CLI parameters to task_option keys+-
    parameter_mapping = {
        '-p': 'profile_name',
        '-i': 'input',
        '--input': 'input',
        '-o': 'output',
        '--output': 'output',
        '-m': 'mode',
        '--mode': 'mode',
        '--acodec': 'acodec',
        '--vcodec': 'vcodec',
        '--abitrate': 'abitrate',
        '--vbitrate': 'vbitrate',
        '--resolution': 'size',
        '--passes': 'passes',
        '--special': 'special',
        '--specialfirst': 'specialfirst',
        '--specialsecond': 'specialsecond',
        '--width': 'width',
        '--height': 'height',
        '--aspect': 'aspect',
        '--number': 'numberof',
        '--positions': 'positions',
        '-D': 'meta_dump',
        '-W': 'meta_input',
        '--dump': 'meta_dump',
        '--write': 'meta_input',
        '--newjobfolder': 'new_job_folder',
        '--oldjobfolder': 'old_job_folder',
        '--recid': 'recid',
        '--collection': 'collection',
        '--search': 'search'
    }

    ## PASSES ##
    ## Transform 'passes' to integer
    if key in ('--passes', ):
        try:
            value = int(value)
        except ValueError:
            write_message('Value of \'--passes\' must be an integer')
            return False

    ## HEIGHT, WIDTH ##
    if key in ('--height', '--width'):
        try:
            value = int(value)
        except ValueError:
            write_message('Value of \'--height\' or \'--width\''
                          ' must be an integer')
            return False

    ## META MODE ##
    ## Transform meta mode values to boolean
    if key in ('-D', '--dump'):
        if not value in ("ffprobe", "mediainfo", "pbcore"):
            write_message("Unknown dumping format, must be 'ffprobe', 'mediainfo' or 'pbcore'")
            return False
    if key in ('--substitute', ):
        value = True
    ## Transform the 'positions' parameter into a list
    if key in ('--positions',):
        try:
            parsed = json.loads(value)
            if type(parsed) is not type(list()):
                write_message(
                    'Value of \'--positions\' must be a json list'
                )
                return False
            else:
                value = parsed
        except ValueError:
            write_message(
                    'Value of \'--positions\' must be a json list'
                )
            return False

    ## NUMBEROF ##
    ## Transform 'number' to integer
    if key in ('--number'):
        try:
            value = int(value)
        except ValueError:
            write_message('Value of \'--number\' must be an integer')
            return False
    ## ASPECT ##
    if key in ('--aspect'):
        try:
            xasp, yasp = str(value).split(':')
            xasp = float(xasp)
            yasp = float(yasp)
            value = xasp / yasp
        except:
            write_message('Value of \'--aspect\' must be in \'4:3\' format')
            return False
    ## RECID ##
    if key in ('--recid'):
        try:
            value = int(value)
        except ValueError:
            write_message('Value of \'--recid\' must be an integer')
            return False

    ## GENERAL MAPPING ##
    ## For all general or other parameters just use the mapping dictionary
    if key in parameter_mapping:
        task_set_option(parameter_mapping[key], value)
        return True
    return False