def manage(self, req, form): """ Web interface for the management of the info space """ uid = getUid(req) argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)}) # If it is an Ajax request, extract any JSON data. ajax_request = False if 'jsondata' in form: json_data = json.loads(str(form['jsondata'])) json_data = json_unicode_to_utf8(json_data) ajax_request = True json_response = {} # Authorization. user_info = collect_user_info(req) if user_info['email'] == 'guest': # User is not logged in. if not ajax_request: # Do not display the introductory recID selection box to guest # users (as it used to be with v0.99.0): dummy_auth_code, auth_message = acc_authorize_action( req, 'runinfomanager') referer = '/info' return page_not_authorized(req=req, referer=referer, text=auth_message) else: # Session has most likely timed out. json_response.update({'status': "timeout"}) return json.dumps(json_response) # Handle request. if not ajax_request: body, errors, warnings = perform_request_init_info_interface() title = 'Info Space Manager' return page(title=title, body=body, errors=errors, warnings=warnings, uid=uid, language=argd['ln'], req=req) else: # Handle AJAX request. if json_data["action"] == "listFiles": json_response.update( perform_request_edit_file(json_data["filename"])) try: return json.dumps(json_response) except UnicodeDecodeError: # Error decoding, the file can be a pdf, image or any kind # of file non-editable return json.dumps({"status": "error_file_not_readable"}) if json_data["action"] == "saveContent": return json.dumps( perform_request_save_file(json_data["filename"], json_data["filecontent"]))
def manage(self, req, form): """ Web interface for the management of the info space """ uid = getUid(req) argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)}) # If it is an Ajax request, extract any JSON data. ajax_request = False if 'jsondata' in form: json_data = json.loads(str(form['jsondata'])) json_data = json_unicode_to_utf8(json_data) ajax_request = True json_response = {} # Authorization. user_info = collect_user_info(req) if user_info['email'] == 'guest': # User is not logged in. if not ajax_request: # Do not display the introductory recID selection box to guest # users (as it used to be with v0.99.0): dummy_auth_code, auth_message = acc_authorize_action(req, 'runinfomanager') referer = '/info' return page_not_authorized(req=req, referer=referer, text=auth_message) else: # Session has most likely timed out. json_response.update({'status': "timeout"}) return json.dumps(json_response) # Handle request. if not ajax_request: body, errors, warnings = perform_request_init_info_interface() title = 'Info Space Manager' return page(title=title, body=body, errors=errors, warnings=warnings, uid=uid, language=argd['ln'], req=req) else: # Handle AJAX request. if json_data["action"] == "listFiles": json_response.update(perform_request_edit_file(json_data["filename"])) try: return json.dumps(json_response) except UnicodeDecodeError: # Error decoding, the file can be a pdf, image or any kind # of file non-editable return json.dumps({"status": "error_file_not_readable"}) if json_data["action"] == "saveContent": return json.dumps(perform_request_save_file(json_data["filename"], json_data["filecontent"]))
def dump_metadata(input_file, output_file, meta_type="ffprobe"): """Dumps the metadata from a given video to the given file The output will be in JSON or XML @param input_file: Full path to the video @param output_file: Full path to the JSON dump file @param type: Metadata style/library to use, either ffprobe, mediainfo or pbcore """ metadata_dict = None if not meta_type in ('ffprobe', 'mediainfo', 'pbcore'): raise ValueError("Type must be ffprobe, pbcore or mediainfo") if meta_type == 'ffprobe': metadata_dict = ffprobe_metadata(input_file) elif meta_type == 'mediainfo': metadata_dict = mediainfo_metadata(input_file) if metadata_dict is not None: metadata_string = json.dumps(metadata_dict, sort_keys=True, indent=4) file = open(output_file, "w") file.write(metadata_string) file.close() ## Dump PBCORE else: pbcore = pbcore_metadata(input_file) file = open(output_file, "w") file.write(pbcore) file.close()
def doilookup(self, req, form): """ Returns the metadata from the crossref website based on the DOI. """ args = wash_urlargd(form, { 'doi': (str, '')}) response = defaultdict(list) if args['doi']: doi = args['doi'] try: marcxml_template = get_marcxml_for_doi(doi) except CrossrefError: # Just ignore Crossref errors pass else: record = create_record(marcxml_template)[0] if record: # We need to convert this record structure to a simple dictionary for key, value in record.items(): # key, value = (773, [([('0', 'PER:64142'), ...], ' ', ' ', '', 47)]) for val in value: # val = ([('0', 'PER:64142'), ...], ' ', ' ', '', 47) ind1 = val[1].replace(" ", "_") ind2 = val[2].replace(" ", "_") for (k, v) in val[0]: # k, v = ('0', 'PER:5409') response[key+ind1+ind2+k].append(v) # The output dictionary is something like: # {"100__a": ['Smith, J.'], # "700__a": ['Anderson, J.', 'Someoneelse, E.'], # "700__u": ['University1', 'University2']} # return dictionary as JSON return json.dumps(response)
def update_redirection(label, plugin, parameters=None): """ Update an existing redirection from /goto/<LABEL> to the URL returned by running the given plugin (as available in REDIRECT_METHODS), with the given parameters. @param label: the uniquely identifying label for this redirection @type label: string @param plugin: the algorithm that should resolve the redirection, usually: "goto_plugin_FOO" @type plugin: string @param parameters: further parameters that should be passed to the plugin. This should be a dictionary or None. Note that these parameters could be overridden by the query parameters. @type parameters: dict or None @raises: ValueError in case the label does not already exist. @note: parameters are going to be serialized to JSON before being stored in the DB. Hence only JSON-serializable values should be put there. """ if not run_sql("SELECT label FROM goto WHERE label=%s", (label, )): raise ValueError("%s label does not already exist" % label) if plugin not in REDIRECT_METHODS: raise ValueError("%s plugin does not exist" % plugin) if parameters is None: parameters = {} try: parameters.items() ## dummy test to see if it exposes the dict interface json_parameters = json.dumps(parameters) except Exception as err: raise ValueError("The parameters %s do not specify a valid JSON map: %s" % (parameters, err)) run_sql("UPDATE goto SET plugin=%s, parameters=%s, modification_date=NOW() WHERE label=%s", (plugin, json_parameters, label))
def doilookup(self, req, form): """ Returns the metadata from the crossref website based on the DOI. """ args = wash_urlargd(form, {'doi': (str, '')}) response = defaultdict(list) if args['doi']: doi = args['doi'] try: marcxml_template = get_marcxml_for_doi(doi) except CrossrefError: # Just ignore Crossref errors pass else: record = create_record(marcxml_template)[0] if record: # We need to convert this record structure to a simple dictionary for key, value in record.items( ): # key, value = (773, [([('0', 'PER:64142'), ...], ' ', ' ', '', 47)]) for val in value: # val = ([('0', 'PER:64142'), ...], ' ', ' ', '', 47) ind1 = val[1].replace(" ", "_") ind2 = val[2].replace(" ", "_") for (k, v) in val[0]: # k, v = ('0', 'PER:5409') response[key + ind1 + ind2 + k].append(v) # The output dictionary is something like: # {"100__a": ['Smith, J.'], # "700__a": ['Anderson, J.', 'Someoneelse, E.'], # "700__u": ['University1', 'University2']} # return dictionary as JSON return json.dumps(response)
def read(label): """Return all information about an redirection.""" import json from .api import get_redirection_data for k, v in sorted(get_redirection_data(label).items()): if k != 'parameters': print("{0}: {1}".format(k, v)) else: print("{0}: {1}".format(k, json.dumps(v)))
def test_create_example_url(self, email, login_method, robot, ip, assertion=None, timeout=None, referer=None, groups=None, nickname=None): """ Create a test URL to test the robot login. @param email: email of the user we want to login as. @type email: string @param login_method: the login_method name as specified in CFG_EXTERNAL_AUTHENTICATION. @type login_method: string @param robot: the identifier of this robot. @type robot: string @param assertion: any further data we want to send to. @type: json serializable mapping @param ip: the IP of the user. @type: string @param timeout: timeout when the URL will expire (in seconds from the Epoch) @type timeout: float @param referer: the URL where to land after successful login. @type referer: string @param groups: the list of optional group of the user. @type groups: list of string @param nickname: the optional nickname of the user. @type nickname: string @return: the URL to login as the user. @rtype: string """ from invenio.modules.access.local_config import CFG_EXTERNAL_AUTHENTICATION from invenio.utils.url import create_url if assertion is None: assertion = {} assertion[self.email_attribute_name] = email if nickname: assertion[self.nickname_attribute_name] = nickname if groups: assertion[self.groups_attribute_name] = self.groups_separator.join(groups) if timeout is None: timeout = time.time() + CFG_ROBOT_URL_TIMEOUT assertion[self.timeout_attribute_name] = timeout if referer is None: referer = CFG_SITE_URL if login_method is None: for a_login_method, details in iteritems(CFG_EXTERNAL_AUTHENTICATION): if details[2]: login_method = a_login_method break robot_keys = load_robot_keys() assertion[self.userip_attribute_name] = ip assertion = json.dumps(assertion) if self.use_zlib: assertion = base64.urlsafe_b64encode(compress(assertion)) shared_key = robot_keys[login_method][robot] digest = self.sign(shared_key, assertion) return create_url("%s%s" % (CFG_SITE_SECURE_URL, "/youraccount/robotlogin"), { 'assertion': assertion, 'robot': robot, 'login_method': login_method, 'digest': digest, 'referer': referer})
def register_redirection(label, plugin, parameters=None, update_on_duplicate=False): """ Register a redirection from /goto/<LABEL> to the URL returned by running the given plugin (as available in REDIRECT_METHODS), with the given parameters. @param label: the uniquely identifying label for this redirection @type label: string @param plugin: the algorithm that should resolve the redirection, usually: "goto_plugin_FOO" @type plugin: string @param parameters: further parameters that should be passed to the plugin. This should be a dictionary or None. Note that these parameters could be overridden by the query parameters. @type parameters: dict or None @param update_on_duplicate: if False (default), if the label already exist it L{register_redirection} will raise a ValueError exception. If True, it will implicitly call L{update_redirection}. @type update_on_duplicate: bool @raises: ValueError in case of duplicate label and L{update_on_duplicate} is set to False. @note: parameters are going to be serialized to JSON before being stored in the DB. Hence only JSON-serializable values should be put there. """ if run_sql("SELECT label FROM goto WHERE label=%s", (label, )): raise ValueError("%s label already exists" % label) if plugin not in REDIRECT_METHODS: raise ValueError("%s plugin does not exist" % plugin) if parameters is None: parameters = {} try: parameters.items() ## dummy test to see if it exposes the dict interface json_parameters = json.dumps(parameters) except Exception as err: raise ValueError("The parameters %s do not specify a valid JSON map: %s" % (parameters, err)) try: run_sql("INSERT INTO goto(label, plugin, parameters, creation_date, modification_date) VALUES(%s, %s, %s, NOW(), NOW())", (label, plugin, json_parameters)) except IntegrityError: if run_sql("SELECT label FROM goto WHERE label=%s", (label,)): if update_on_duplicate: update_redirection(label=label, plugin=plugin, parameters=parameters) else: raise ValueError("%s label already exists" % label) else: ## This is due to some other issue raise
def print_rules(): """Prints the valid rules to stdout""" plugins = load_plugins() for rule_name, rule in load_rules(plugins).items(): print "Rule %s:" % rule_name if "filter_pattern" in rule: print " - Filter: %s" % rule["filter_pattern"] if "filter_collection" in rule: print " - Filter collection: %s" % rule["filter_collection"] print " - Checker: %s" % rule["check"] if len(rule["checker_params"]) > 0: print " Parameters:" for param, val in rule["checker_params"].items(): print " %s = %s" % (param, json.dumps(val)) print
def getHoldingPenData(req, elementId): try: getUid(req) except Error: return "unauthorised access !" auth = check_user(req, 'cfgoaiharvest') if auth[0]: return "unauthorised access !" elements = elementId.split("_") resultHtml = None if len(elements) == 2: filter_key = elements[1] resultHtml = oha.perform_request_gethpyears(elements[0], filter_key) elif len(elements) == 3: # only the year is specified filter_key = elements[2] nodeYear = int(elements[1]) resultHtml = oha.perform_request_gethpyear(elements[0], nodeYear, filter_key) elif len(elements) == 4: # year and month specified nodeYear = int(elements[1]) nodeMonth = int(elements[2]) filter_key = elements[3] resultHtml = oha.perform_request_gethpmonth(elements[0], nodeYear, nodeMonth, filter_key) elif len(elements) == 5: # year, month and day specified - returning the entries themselves nodeYear = int(elements[1]) nodeMonth = int(elements[2]) nodeDay = int(elements[3]) filter_key = elements[4] daySize = get_holdingpen_day_size(nodeYear, nodeMonth, nodeDay, filter_key) urlFilter = urllib.quote(filter_key) resultHtml = oha.perform_request_gethpdayfragment( nodeYear, nodeMonth, nodeDay, daySize, 0, urlFilter) else: # nothing of the above. error resultHtml = "Wrong request" return json.dumps({"elementId": elementId, "html": resultHtml})
def create_job_from_dictionary(job_dict, job_filename=None, job_directory=CFG_BIBENCODE_DAEMON_DIR_NEWJOBS): """ Creates a job from a given dictionary @param job_dict: Dictionary that contains the job description @type job_dict: job_dict @param job_filename: Filename for the job @type job_filename: string @param job_directory: fullpath to the directory storing the job files @type job_directory: string """ if not job_filename: job_filename = str(uuid.uuid4()) if not job_filename.endswith(".job"): job_filename += ".job" job_fullpath = os.path.join(job_directory, job_filename) job_string = json.dumps(job_dict, sort_keys=False, indent=4) file = open(job_fullpath, "w") file.write(job_string) file.close()
def getHoldingPenData(req, elementId): try: getUid(req) except Error: return "unauthorised access !" auth = check_user(req, 'cfgoaiharvest') if auth[0]: return "unauthorised access !" elements = elementId.split("_") resultHtml = None if len(elements) == 2: filter_key = elements[1] resultHtml = oha.perform_request_gethpyears(elements[0], filter_key) elif len(elements) == 3: # only the year is specified filter_key = elements[2] nodeYear = int(elements[1]) resultHtml = oha.perform_request_gethpyear(elements[0], nodeYear, filter_key) elif len(elements) == 4: # year and month specified nodeYear = int(elements[1]) nodeMonth = int(elements[2]) filter_key = elements[3] resultHtml = oha.perform_request_gethpmonth(elements[0], nodeYear, nodeMonth, filter_key) elif len(elements) == 5: # year, month and day specified - returning the entries themselves nodeYear = int(elements[1]) nodeMonth = int(elements[2]) nodeDay = int(elements[3]) filter_key = elements[4] daySize = get_holdingpen_day_size(nodeYear, nodeMonth, nodeDay, filter_key) urlFilter = urllib.quote(filter_key) resultHtml = oha.perform_request_gethpdayfragment(nodeYear, nodeMonth, nodeDay, daySize, 0, urlFilter) else: # nothing of the above. error resultHtml = "Wrong request" return json.dumps({"elementId": elementId, "html": resultHtml})
def uploadfile(self, req, form): """ Similar to /submit, but only consider files. Nice for asynchronous Javascript uploads. Should be used to upload a single file. Also try to create an icon, and return URL to file(s) + icon(s) Authentication is performed based on session ID passed as parameter instead of cookie-based authentication, due to the use of this URL by the Flash plugin (to upload multiple files at once), which does not route cookies. FIXME: consider adding /deletefile and /modifyfile functions + parsing of additional parameters to rename files, add comments, restrictions, etc. """ argd = wash_urlargd( form, { 'doctype': (str, ''), 'access': (str, ''), 'indir': (str, ''), 'session_id': (str, ''), 'rename': (str, ''), }) curdir = None if "indir" not in form or \ "doctype" not in form or \ "access" not in form: raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST) else: curdir = os.path.join(CFG_WEBSUBMIT_STORAGEDIR, argd['indir'], argd['doctype'], argd['access']) user_info = collect_user_info(req) if "session_id" in form: # Are we uploading using Flash, which does not transmit # cookie? The expect to receive session_id as a form # parameter. First check that IP addresses do not # mismatch. uid = session.uid user_info = collect_user_info(uid) try: act_fd = file(os.path.join(curdir, 'act')) action = act_fd.read() act_fd.close() except: action = "" try: recid_fd = file(os.path.join(curdir, 'SN')) recid = recid_fd.read() recid_fd.close() except: recid = '' user_is_owner = False if recid: user_is_owner = is_user_owner_of_record(user_info, recid) try: categ_fd = file(os.path.join(curdir, 'combo%s' % argd['doctype'])) categ = categ_fd.read() categ_fd.close() except IOError: categ = '*' # Is user authorized to perform this action? (auth_code, auth_message) = acc_authorize_action( uid, "submit", authorized_if_no_roles=not isGuestUser(uid), verbose=0, doctype=argd['doctype'], act=action, categ=categ) if acc_is_role("submit", doctype=argd['doctype'], act=action) and auth_code != 0 and not user_is_owner: # User cannot submit raise apache.SERVER_RETURN(apache.HTTP_UNAUTHORIZED) else: # Process the upload and get the response added_files = {} for key, formfields in form.items(): filename = key.replace("[]", "") file_to_open = os.path.join(curdir, filename) if hasattr(formfields, "filename") and formfields.filename: dir_to_open = os.path.abspath( os.path.join(curdir, 'files', str(user_info['uid']), key)) try: assert ( dir_to_open.startswith(CFG_WEBSUBMIT_STORAGEDIR)) except AssertionError: register_exception(req=req, prefix='curdir="%s", key="%s"' % (curdir, key)) raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) if not os.path.exists(dir_to_open): try: os.makedirs(dir_to_open) except OSError as e: if e.errno != errno.EEXIST: # If the issue is only that directory # already exists, then continue, else # report register_exception(req=req, alert_admin=True) raise apache.SERVER_RETURN( apache.HTTP_FORBIDDEN) filename = formfields.filename ## Before saving the file to disc, wash the filename (in particular ## washing away UNIX and Windows (e.g. DFS) paths): filename = os.path.basename(filename.split('\\')[-1]) filename = filename.strip() if filename != "": # Check that file does not already exist n = 1 while os.path.exists( os.path.join(dir_to_open, filename)): #dirname, basename, extension = decompose_file(new_destination_path) basedir, name, extension = decompose_file(filename) new_name = propose_next_docname(name) filename = new_name + extension # This may be dangerous if the file size is bigger than the available memory fp = open(os.path.join(dir_to_open, filename), "w") fp.write(formfields.file.read()) fp.close() fp = open(os.path.join(curdir, "lastuploadedfile"), "w") fp.write(filename) fp.close() fp = open(file_to_open, "w") fp.write(filename) fp.close() try: # Create icon (icon_path, icon_name) = create_icon({ 'input-file': os.path.join(dir_to_open, filename), 'icon-name': filename, # extension stripped automatically 'icon-file-format': 'gif', 'multipage-icon': False, 'multipage-icon-delay': 100, 'icon-scale': "300>", # Resize only if width > 300 'verbosity': 0, }) icons_dir = os.path.join( os.path.join(curdir, 'icons', str(user_info['uid']), key)) if not os.path.exists(icons_dir): # Create uid/icons dir if needed try: os.makedirs(icons_dir) except OSError as e: if e.errno != errno.EEXIST: # If the issue is only that # directory already exists, # then continue, else report register_exception(req=req, alert_admin=True) raise apache.SERVER_RETURN( apache.HTTP_FORBIDDEN) os.rename(os.path.join(icon_path, icon_name), os.path.join(icons_dir, icon_name)) added_files[key] = { 'name': filename, 'iconName': icon_name } except InvenioWebSubmitIconCreatorError as e: # We could not create the icon added_files[key] = {'name': filename} continue else: raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST) # Send our response if CFG_JSON_AVAILABLE: return json.dumps(added_files)
def upload_video(self, req, form): """ A clone of uploadfile but for (large) videos. Does not copy the uploaded file to the websubmit directory. Instead, the path to the file is stored inside the submission directory. """ def gcd(a, b): """ the euclidean algorithm """ while a: a, b = b % a, a return b from invenio.modules.encoder.extract import extract_frames from invenio.modules.encoder.config import CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_DIR, CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_FNAME from invenio.modules.encoder.encode import determine_aspect from invenio.modules.encoder.utils import probe from invenio.modules.encoder.metadata import ffprobe_metadata from invenio.legacy.websubmit.config import CFG_WEBSUBMIT_TMP_VIDEO_PREFIX argd = wash_urlargd( form, { 'doctype': (str, ''), 'access': (str, ''), 'indir': (str, ''), 'session_id': (str, ''), 'rename': (str, ''), }) curdir = None if "indir" not in form or \ "doctype" not in form or \ "access" not in form: raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST) else: curdir = os.path.join(CFG_WEBSUBMIT_STORAGEDIR, argd['indir'], argd['doctype'], argd['access']) user_info = collect_user_info(req) if "session_id" in form: # Are we uploading using Flash, which does not transmit # cookie? The expect to receive session_id as a form # parameter. First check that IP addresses do not # mismatch. uid = session.uid user_info = collect_user_info(uid) try: act_fd = file(os.path.join(curdir, 'act')) action = act_fd.read() act_fd.close() except: act = "" # Is user authorized to perform this action? (auth_code, auth_message) = acc_authorize_action( uid, "submit", authorized_if_no_roles=not isGuestUser(uid), verbose=0, doctype=argd['doctype'], act=action) if acc_is_role("submit", doctype=argd['doctype'], act=action) and auth_code != 0: # User cannot submit raise apache.SERVER_RETURN(apache.HTTP_UNAUTHORIZED) else: # Process the upload and get the response json_response = {} for key, formfields in form.items(): filename = key.replace("[]", "") if hasattr(formfields, "filename") and formfields.filename: dir_to_open = os.path.abspath( os.path.join(curdir, 'files', str(user_info['uid']), key)) try: assert ( dir_to_open.startswith(CFG_WEBSUBMIT_STORAGEDIR)) except AssertionError: register_exception(req=req, prefix='curdir="%s", key="%s"' % (curdir, key)) raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) if not os.path.exists(dir_to_open): try: os.makedirs(dir_to_open) except OSError as e: if e.errno != errno.EEXIST: # If the issue is only that directory # already exists, then continue, else # report register_exception(req=req, alert_admin=True) raise apache.SERVER_RETURN( apache.HTTP_FORBIDDEN) filename = formfields.filename ## Before saving the file to disc, wash the filename (in particular ## washing away UNIX and Windows (e.g. DFS) paths): filename = os.path.basename(filename.split('\\')[-1]) filename = filename.strip() if filename != "": # Check that file does not already exist while os.path.exists( os.path.join(dir_to_open, filename)): #dirname, basename, extension = decompose_file(new_destination_path) basedir, name, extension = decompose_file(filename) new_name = propose_next_docname(name) filename = new_name + extension #-------------# # VIDEO STUFF # #-------------# ## Remove all previous uploads filelist = os.listdir( os.path.split(formfields.file.name)[0]) for afile in filelist: if argd['access'] in afile: os.remove( os.path.join( os.path.split(formfields.file.name)[0], afile)) ## Check if the file is a readable video ## We must exclude all image and audio formats that are readable by ffprobe if (os.path.splitext(filename)[1] in [ 'jpg', 'jpeg', 'gif', 'tiff', 'bmp', 'png', 'tga', 'jp2', 'j2k', 'jpf', 'jpm', 'mj2', 'biff', 'cgm', 'exif', 'img', 'mng', 'pic', 'pict', 'raw', 'wmf', 'jpe', 'jif', 'jfif', 'jfi', 'tif', 'webp', 'svg', 'ai', 'ps', 'psd', 'wav', 'mp3', 'pcm', 'aiff', 'au', 'flac', 'wma', 'm4a', 'wv', 'oga', 'm4a', 'm4b', 'm4p', 'm4r', 'aac', 'mp4', 'vox', 'amr', 'snd' ] or not probe(formfields.file.name)): formfields.file.close() raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) ## We have no "delete" attribute in Python 2.4 if sys.hexversion < 0x2050000: ## We need to rename first and create a dummy file ## Rename the temporary file for the garbage collector new_tmp_fullpath = os.path.split( formfields.file.name )[0] + "/" + CFG_WEBSUBMIT_TMP_VIDEO_PREFIX + argd[ 'access'] + "_" + os.path.split( formfields.file.name)[1] os.rename(formfields.file.name, new_tmp_fullpath) dummy = open(formfields.file.name, "w") dummy.close() formfields.file.close() else: # Mark the NamedTemporatyFile as not to be deleted formfields.file.delete = False formfields.file.close() ## Rename the temporary file for the garbage collector new_tmp_fullpath = os.path.split( formfields.file.name )[0] + "/" + CFG_WEBSUBMIT_TMP_VIDEO_PREFIX + argd[ 'access'] + "_" + os.path.split( formfields.file.name)[1] os.rename(formfields.file.name, new_tmp_fullpath) # Write the path to the temp file to a file in STORAGEDIR fp = open(os.path.join(dir_to_open, "filepath"), "w") fp.write(new_tmp_fullpath) fp.close() fp = open(os.path.join(dir_to_open, "filename"), "w") fp.write(filename) fp.close() ## We are going to extract some thumbnails for websubmit ## sample_dir = os.path.join( curdir, 'files', str(user_info['uid']), CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_DIR) try: ## Remove old thumbnails shutil.rmtree(sample_dir) except OSError: register_exception(req=req, alert_admin=False) try: os.makedirs( os.path.join(curdir, 'files', str(user_info['uid']), sample_dir)) except OSError: register_exception(req=req, alert_admin=False) try: extract_frames( input_file=new_tmp_fullpath, output_file=os.path.join( sample_dir, CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_FNAME ), size="600x600", numberof=5) json_response['frames'] = [] for extracted_frame in os.listdir(sample_dir): json_response['frames'].append(extracted_frame) except: ## If the frame extraction fails, something was bad with the video os.remove(new_tmp_fullpath) register_exception(req=req, alert_admin=False) raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) ## Try to detect the aspect. if this fails, the video is not readable ## or a wrong file might have been uploaded try: (aspect, width, height) = determine_aspect(new_tmp_fullpath) if aspect: aspx, aspy = aspect.split(':') else: the_gcd = gcd(width, height) aspx = str(width / the_gcd) aspy = str(height / the_gcd) json_response['aspx'] = aspx json_response['aspy'] = aspy except TypeError: ## If the aspect detection completely fails os.remove(new_tmp_fullpath) register_exception(req=req, alert_admin=False) raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) ## Try to extract some metadata from the video container metadata = ffprobe_metadata(new_tmp_fullpath) json_response['meta_title'] = metadata['format'].get( 'TAG:title') json_response['meta_description'] = metadata[ 'format'].get('TAG:description') json_response['meta_year'] = metadata['format'].get( 'TAG:year') json_response['meta_author'] = metadata['format'].get( 'TAG:author') ## Empty file name else: raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST) ## We found our file, we can break the loop break # Send our response if CFG_JSON_AVAILABLE: dumped_response = json.dumps(json_response) # store the response in the websubmit directory # this is needed if the submission is not finished and continued later response_dir = os.path.join(curdir, 'files', str(user_info['uid']), "response") try: os.makedirs(response_dir) except OSError: # register_exception(req=req, alert_admin=False) pass fp = open(os.path.join(response_dir, "response"), "w") fp.write(dumped_response) fp.close() return dumped_response
def templates(self, req, form): """handle a edit/templates request""" uid = current_user.get_id() argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)}) # Abort if the simplejson module isn't available if not CFG_JSON_AVAILABLE: title = 'Record Editor Template Manager' body = '''Sorry, the record editor cannot operate when the `simplejson' module is not installed. Please see the INSTALL file.''' return page(title = title, body = body, errors = [], warnings = [], uid = uid, language = argd['ln'], navtrail = navtrail_bibedit, lastupdated = __lastupdated__, req = req, body_css_classes = ['bibedit']) # If it is an Ajax request, extract any JSON data. ajax_request = False if 'jsondata' in form: json_data = json.loads(str(form['jsondata'])) # Deunicode all strings (Invenio doesn't have unicode # support). json_data = json_unicode_to_utf8(json_data) ajax_request = True json_response = {'resultCode': 0} # Authorization. if current_user.is_guest: # User is not logged in. if not ajax_request: # Do not display the introductory recID selection box to guest # users (as it used to be with v0.99.0): dummy_auth_code, auth_message = acc_authorize_action(req, 'runbibedit') referer = '/edit' return page_not_authorized(req=req, referer=referer, text=auth_message, navtrail=navtrail) else: # Session has most likely timed out. json_response.update({'resultCode': 100}) return json.dumps(json_response) # Handle request. if not ajax_request: # Show BibEdit template management start page. body, errors, warnings = perform_request_init_template_interface() title = 'Record Editor Template Manager' return page(title = title, body = body, errors = errors, warnings = warnings, uid = uid, language = argd['ln'], navtrail = navtrail_bibedit, lastupdated = __lastupdated__, req = req, body_css_classes = ['bibedit']) else: # Handle AJAX request. json_response.update(perform_request_ajax_template_interface(json_data)) return json.dumps(json_response)
def _process_json_request(self, form, req): """Takes care about the json requests.""" argd = wash_urlargd(form, { self._JSON_DATA_KEY: (str, ""), }) # load json data json_data_string = argd[self._JSON_DATA_KEY] json_data_unicode = json.loads(json_data_string) json_data = json_unicode_to_utf8(json_data_unicode) language = json_data["language"] search_criteria = json_data["searchCriteria"] output_tags = json_data["outputTags"] output_tags = output_tags.split(',') output_tags = [tag.strip() for tag in output_tags] action_type = json_data["actionType"] current_record_id = json_data["currentRecordID"] commands = json_data["commands"] output_format = json_data["outputFormat"] page_to_display = json_data["pageToDisplay"] collection = json_data["collection"] compute_modifications = json_data["compute_modifications"] checked_records = json_data["checked_records"] json_response = {} if action_type == self._action_types.test_search: json_response.update(multi_edit_engine.perform_request_test_search( search_criteria, [], output_format, page_to_display, language, output_tags, collection, req=req, checked_records=checked_records)) json_response['display_info_box'] = 1 json_response['info_html'] = "" return json.dumps(json_response) elif action_type == self._action_types.display_detailed_record: json_response.update(multi_edit_engine.perform_request_detailed_record( current_record_id, [], output_format, language)) return json.dumps(json_response) elif action_type == self._action_types.preview_results: commands_list, upload_mode, tag_list = self._create_commands_list(commands) json_response = {} json_response.update(multi_edit_engine.perform_request_test_search( search_criteria, commands_list, output_format, page_to_display, language, output_tags, collection, compute_modifications, upload_mode, req, checked_records)) return json.dumps(json_response) elif action_type == self._action_types.display_detailed_result: commands_list, upload_mode, tag_list = self._create_commands_list(commands) json_response.update(multi_edit_engine.perform_request_detailed_record( current_record_id, commands_list, output_format, language)) return json.dumps(json_response) elif action_type == self._action_types.submit_changes: commands_list, upload_mode, tag_list = self._create_commands_list(commands) json_response.update(multi_edit_engine.perform_request_submit_changes(search_criteria, commands_list, language, upload_mode, tag_list, collection, req, checked_records)) return json.dumps(json_response) # In case we obtain wrong action type we return empty page. return " "
def uploadfile(self, req, form): """ Similar to /submit, but only consider files. Nice for asynchronous Javascript uploads. Should be used to upload a single file. Also try to create an icon, and return URL to file(s) + icon(s) Authentication is performed based on session ID passed as parameter instead of cookie-based authentication, due to the use of this URL by the Flash plugin (to upload multiple files at once), which does not route cookies. FIXME: consider adding /deletefile and /modifyfile functions + parsing of additional parameters to rename files, add comments, restrictions, etc. """ argd = wash_urlargd(form, { 'doctype': (str, ''), 'access': (str, ''), 'indir': (str, ''), 'session_id': (str, ''), 'rename': (str, ''), }) curdir = None if "indir" not in form or \ "doctype" not in form or \ "access" not in form: raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST) else: curdir = os.path.join(CFG_WEBSUBMIT_STORAGEDIR, argd['indir'], argd['doctype'], argd['access']) user_info = collect_user_info(req) if "session_id" in form: # Are we uploading using Flash, which does not transmit # cookie? The expect to receive session_id as a form # parameter. First check that IP addresses do not # mismatch. uid = session.uid user_info = collect_user_info(uid) try: act_fd = file(os.path.join(curdir, 'act')) action = act_fd.read() act_fd.close() except: action = "" # Is user authorized to perform this action? (auth_code, auth_message) = acc_authorize_action(uid, "submit", authorized_if_no_roles=not isGuestUser(uid), verbose=0, doctype=argd['doctype'], act=action) if acc_is_role("submit", doctype=argd['doctype'], act=action) and auth_code != 0: # User cannot submit raise apache.SERVER_RETURN(apache.HTTP_UNAUTHORIZED) else: # Process the upload and get the response added_files = {} for key, formfields in form.items(): filename = key.replace("[]", "") file_to_open = os.path.join(curdir, filename) if hasattr(formfields, "filename") and formfields.filename: dir_to_open = os.path.abspath(os.path.join(curdir, 'files', str(user_info['uid']), key)) try: assert(dir_to_open.startswith(CFG_WEBSUBMIT_STORAGEDIR)) except AssertionError: register_exception(req=req, prefix='curdir="%s", key="%s"' % (curdir, key)) raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) if not os.path.exists(dir_to_open): try: os.makedirs(dir_to_open) except OSError as e: if e.errno != errno.EEXIST: # If the issue is only that directory # already exists, then continue, else # report register_exception(req=req, alert_admin=True) raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) filename = formfields.filename ## Before saving the file to disc, wash the filename (in particular ## washing away UNIX and Windows (e.g. DFS) paths): filename = os.path.basename(filename.split('\\')[-1]) filename = filename.strip() if filename != "": # Check that file does not already exist n = 1 while os.path.exists(os.path.join(dir_to_open, filename)): #dirname, basename, extension = decompose_file(new_destination_path) basedir, name, extension = decompose_file(filename) new_name = propose_next_docname(name) filename = new_name + extension # This may be dangerous if the file size is bigger than the available memory fp = open(os.path.join(dir_to_open, filename), "w") fp.write(formfields.file.read()) fp.close() fp = open(os.path.join(curdir, "lastuploadedfile"), "w") fp.write(filename) fp.close() fp = open(file_to_open, "w") fp.write(filename) fp.close() try: # Create icon (icon_path, icon_name) = create_icon( { 'input-file' : os.path.join(dir_to_open, filename), 'icon-name' : filename, # extension stripped automatically 'icon-file-format' : 'gif', 'multipage-icon' : False, 'multipage-icon-delay' : 100, 'icon-scale' : "300>", # Resize only if width > 300 'verbosity' : 0, }) icons_dir = os.path.join(os.path.join(curdir, 'icons', str(user_info['uid']), key)) if not os.path.exists(icons_dir): # Create uid/icons dir if needed try: os.makedirs(icons_dir) except OSError as e: if e.errno != errno.EEXIST: # If the issue is only that # directory already exists, # then continue, else report register_exception(req=req, alert_admin=True) raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) os.rename(os.path.join(icon_path, icon_name), os.path.join(icons_dir, icon_name)) added_files[key] = {'name': filename, 'iconName': icon_name} except InvenioWebSubmitIconCreatorError as e: # We could not create the icon added_files[key] = {'name': filename} continue else: raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST) # Send our response if CFG_JSON_AVAILABLE: return json.dumps(added_files)
def upload_video(self, req, form): """ A clone of uploadfile but for (large) videos. Does not copy the uploaded file to the websubmit directory. Instead, the path to the file is stored inside the submission directory. """ def gcd(a, b): """ the euclidean algorithm """ while a: a, b = b % a, a return b from invenio.modules.encoder.extract import extract_frames from invenio.modules.encoder.config import CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_DIR, CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_FNAME from invenio.modules.encoder.encode import determine_aspect from invenio.modules.encoder.utils import probe from invenio.modules.encoder.metadata import ffprobe_metadata from invenio.legacy.websubmit.config import CFG_WEBSUBMIT_TMP_VIDEO_PREFIX argd = wash_urlargd(form, { 'doctype': (str, ''), 'access': (str, ''), 'indir': (str, ''), 'session_id': (str, ''), 'rename': (str, ''), }) curdir = None if "indir" not in form or \ "doctype" not in form or \ "access" not in form: raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST) else: curdir = os.path.join(CFG_WEBSUBMIT_STORAGEDIR, argd['indir'], argd['doctype'], argd['access']) user_info = collect_user_info(req) if "session_id" in form: # Are we uploading using Flash, which does not transmit # cookie? The expect to receive session_id as a form # parameter. First check that IP addresses do not # mismatch. uid = session.uid user_info = collect_user_info(uid) try: act_fd = file(os.path.join(curdir, 'act')) action = act_fd.read() act_fd.close() except: act = "" # Is user authorized to perform this action? (auth_code, auth_message) = acc_authorize_action(uid, "submit", authorized_if_no_roles=not isGuestUser(uid), verbose=0, doctype=argd['doctype'], act=action) if acc_is_role("submit", doctype=argd['doctype'], act=action) and auth_code != 0: # User cannot submit raise apache.SERVER_RETURN(apache.HTTP_UNAUTHORIZED) else: # Process the upload and get the response json_response = {} for key, formfields in form.items(): filename = key.replace("[]", "") if hasattr(formfields, "filename") and formfields.filename: dir_to_open = os.path.abspath(os.path.join(curdir, 'files', str(user_info['uid']), key)) try: assert(dir_to_open.startswith(CFG_WEBSUBMIT_STORAGEDIR)) except AssertionError: register_exception(req=req, prefix='curdir="%s", key="%s"' % (curdir, key)) raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) if not os.path.exists(dir_to_open): try: os.makedirs(dir_to_open) except OSError as e: if e.errno != errno.EEXIST: # If the issue is only that directory # already exists, then continue, else # report register_exception(req=req, alert_admin=True) raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) filename = formfields.filename ## Before saving the file to disc, wash the filename (in particular ## washing away UNIX and Windows (e.g. DFS) paths): filename = os.path.basename(filename.split('\\')[-1]) filename = filename.strip() if filename != "": # Check that file does not already exist while os.path.exists(os.path.join(dir_to_open, filename)): #dirname, basename, extension = decompose_file(new_destination_path) basedir, name, extension = decompose_file(filename) new_name = propose_next_docname(name) filename = new_name + extension #-------------# # VIDEO STUFF # #-------------# ## Remove all previous uploads filelist = os.listdir(os.path.split(formfields.file.name)[0]) for afile in filelist: if argd['access'] in afile: os.remove(os.path.join(os.path.split(formfields.file.name)[0], afile)) ## Check if the file is a readable video ## We must exclude all image and audio formats that are readable by ffprobe if (os.path.splitext(filename)[1] in ['jpg', 'jpeg', 'gif', 'tiff', 'bmp', 'png', 'tga', 'jp2', 'j2k', 'jpf', 'jpm', 'mj2', 'biff', 'cgm', 'exif', 'img', 'mng', 'pic', 'pict', 'raw', 'wmf', 'jpe', 'jif', 'jfif', 'jfi', 'tif', 'webp', 'svg', 'ai', 'ps', 'psd', 'wav', 'mp3', 'pcm', 'aiff', 'au', 'flac', 'wma', 'm4a', 'wv', 'oga', 'm4a', 'm4b', 'm4p', 'm4r', 'aac', 'mp4', 'vox', 'amr', 'snd'] or not probe(formfields.file.name)): formfields.file.close() raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) ## We have no "delete" attribute in Python 2.4 if sys.hexversion < 0x2050000: ## We need to rename first and create a dummy file ## Rename the temporary file for the garbage collector new_tmp_fullpath = os.path.split(formfields.file.name)[0] + "/" + CFG_WEBSUBMIT_TMP_VIDEO_PREFIX + argd['access'] + "_" + os.path.split(formfields.file.name)[1] os.rename(formfields.file.name, new_tmp_fullpath) dummy = open(formfields.file.name, "w") dummy.close() formfields.file.close() else: # Mark the NamedTemporatyFile as not to be deleted formfields.file.delete = False formfields.file.close() ## Rename the temporary file for the garbage collector new_tmp_fullpath = os.path.split(formfields.file.name)[0] + "/" + CFG_WEBSUBMIT_TMP_VIDEO_PREFIX + argd['access'] + "_" + os.path.split(formfields.file.name)[1] os.rename(formfields.file.name, new_tmp_fullpath) # Write the path to the temp file to a file in STORAGEDIR fp = open(os.path.join(dir_to_open, "filepath"), "w") fp.write(new_tmp_fullpath) fp.close() fp = open(os.path.join(dir_to_open, "filename"), "w") fp.write(filename) fp.close() ## We are going to extract some thumbnails for websubmit ## sample_dir = os.path.join(curdir, 'files', str(user_info['uid']), CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_DIR) try: ## Remove old thumbnails shutil.rmtree(sample_dir) except OSError: register_exception(req=req, alert_admin=False) try: os.makedirs(os.path.join(curdir, 'files', str(user_info['uid']), sample_dir)) except OSError: register_exception(req=req, alert_admin=False) try: extract_frames(input_file=new_tmp_fullpath, output_file=os.path.join(sample_dir, CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_FNAME), size="600x600", numberof=5) json_response['frames'] = [] for extracted_frame in os.listdir(sample_dir): json_response['frames'].append(extracted_frame) except: ## If the frame extraction fails, something was bad with the video os.remove(new_tmp_fullpath) register_exception(req=req, alert_admin=False) raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) ## Try to detect the aspect. if this fails, the video is not readable ## or a wrong file might have been uploaded try: (aspect, width, height) = determine_aspect(new_tmp_fullpath) if aspect: aspx, aspy = aspect.split(':') else: the_gcd = gcd(width, height) aspx = str(width / the_gcd) aspy = str(height / the_gcd) json_response['aspx'] = aspx json_response['aspy'] = aspy except TypeError: ## If the aspect detection completely fails os.remove(new_tmp_fullpath) register_exception(req=req, alert_admin=False) raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) ## Try to extract some metadata from the video container metadata = ffprobe_metadata(new_tmp_fullpath) json_response['meta_title'] = metadata['format'].get('TAG:title') json_response['meta_description'] = metadata['format'].get('TAG:description') json_response['meta_year'] = metadata['format'].get('TAG:year') json_response['meta_author'] = metadata['format'].get('TAG:author') ## Empty file name else: raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST) ## We found our file, we can break the loop break; # Send our response if CFG_JSON_AVAILABLE: dumped_response = json.dumps(json_response) # store the response in the websubmit directory # this is needed if the submission is not finished and continued later response_dir = os.path.join(curdir, 'files', str(user_info['uid']), "response") try: os.makedirs(response_dir) except OSError: # register_exception(req=req, alert_admin=False) pass fp = open(os.path.join(response_dir, "response"), "w") fp.write(dumped_response) fp.close() return dumped_response
def index(self, req, form): """Handle all BibEdit requests. The responsibilities of this functions is: * JSON decoding and encoding. * Redirection, if necessary. * Authorization. * Calling the appropriate function from the engine. """ uid = current_user.get_id() argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)}) # Abort if the simplejson module isn't available if not CFG_JSON_AVAILABLE: title = 'Record Editor' body = '''Sorry, the record editor cannot operate when the `simplejson' module is not installed. Please see the INSTALL file.''' return page(title = title, body = body, errors = [], warnings = [], uid = uid, language = argd['ln'], navtrail = navtrail, lastupdated = __lastupdated__, req = req, body_css_classes = ['bibedit']) # If it is an Ajax request, extract any JSON data. ajax_request, recid = False, None if 'jsondata' in form: json_data = json.loads(str(form['jsondata'])) # Deunicode all strings (Invenio doesn't have unicode # support). json_data = json_unicode_to_utf8(json_data) ajax_request = True if 'recID' in json_data: recid = json_data['recID'] json_response = {'resultCode': 0, 'ID': json_data['ID']} # Authorization. if current_user.is_guest: # User is not logged in. if not ajax_request: # Do not display the introductory recID selection box to guest # users (as it used to be with v0.99.0): dummy_auth_code, auth_message = acc_authorize_action(req, 'runbibedit') referer = '/edit/' if self.recid: referer = '/%s/%s/edit/' % (CFG_SITE_RECORD, self.recid) return page_not_authorized(req=req, referer=referer, text=auth_message, navtrail=navtrail) else: # Session has most likely timed out. json_response.update({'resultCode': 100}) return json.dumps(json_response) elif self.recid: # Handle RESTful calls from logged in users by redirecting to # generic URL. redirect_to_url(req, '%s/%s/edit/#state=edit&recid=%s&recrev=%s' % ( CFG_SITE_SECURE_URL, CFG_SITE_RECORD, self.recid, "")) elif recid is not None: json_response.update({'recID': recid}) if json_data['requestType'] == "getRecord": # Authorize access to record. if not user_can_edit_record_collection(req, recid): json_response.update({'resultCode': 101}) return json.dumps(json_response) # Handle request. if not ajax_request: # Show BibEdit start page. body, errors, warnings = perform_request_init(uid, argd['ln'], req, __lastupdated__) title = 'Record Editor' return page(title = title, body = body, errors = errors, warnings = warnings, uid = uid, language = argd['ln'], navtrail = navtrail, lastupdated = __lastupdated__, req = req, body_css_classes = ['bibedit']) else: # Handle AJAX request. json_response.update(perform_request_ajax(req, recid, uid, json_data)) return json.dumps(json_response)
def test_create_example_url(self, email, login_method, robot, ip, assertion=None, timeout=None, referer=None, groups=None, nickname=None): """ Create a test URL to test the robot login. @param email: email of the user we want to login as. @type email: string @param login_method: the login_method name as specified in CFG_EXTERNAL_AUTHENTICATION. @type login_method: string @param robot: the identifier of this robot. @type robot: string @param assertion: any further data we want to send to. @type: json serializable mapping @param ip: the IP of the user. @type: string @param timeout: timeout when the URL will expire (in seconds from the Epoch) @type timeout: float @param referer: the URL where to land after successful login. @type referer: string @param groups: the list of optional group of the user. @type groups: list of string @param nickname: the optional nickname of the user. @type nickname: string @return: the URL to login as the user. @rtype: string """ from invenio.modules.access.local_config import CFG_EXTERNAL_AUTHENTICATION from invenio.utils.url import create_url if assertion is None: assertion = {} assertion[self.email_attribute_name] = email if nickname: assertion[self.nickname_attribute_name] = nickname if groups: assertion[self.groups_attribute_name] = self.groups_separator.join( groups) if timeout is None: timeout = time.time() + CFG_ROBOT_URL_TIMEOUT assertion[self.timeout_attribute_name] = timeout if referer is None: referer = CFG_SITE_URL if login_method is None: for a_login_method, details in iteritems( CFG_EXTERNAL_AUTHENTICATION): if details[2]: login_method = a_login_method break robot_keys = load_robot_keys() assertion[self.userip_attribute_name] = ip assertion = json.dumps(assertion) if self.use_zlib: assertion = base64.urlsafe_b64encode(compress(assertion)) shared_key = robot_keys[login_method][robot] digest = self.sign(shared_key, assertion) return create_url( "%s%s" % (CFG_SITE_SECURE_URL, "/youraccount/robotlogin"), { 'assertion': assertion, 'robot': robot, 'login_method': login_method, 'digest': digest, 'referer': referer })
def index(self, req, form): """Handle all BibMerge requests. The responsibilities of this functions are: * JSON decoding and encoding. * Redirection, if necessary. * Authorization. * Calling the appropriate function from the engine. """ # If it is an Ajax request, extract any JSON data. ajax_request, recid1, recid2 = False, None, None argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)}) if 'jsondata' in form: json_data = json.loads(str(form['jsondata'])) # Deunicode all strings (Invenio doesn't have unicode # support). json_data = json_unicode_to_utf8(json_data) ajax_request = True json_response = {} try: if json_data.has_key('recID1'): recid1 = int(json_data['recID1']) json_data['recID1'] = recid1 if json_data.has_key('recID2'): if json_data.get('record2Mode') == "recid": recid2 = int(json_data['recID2']) json_data['recID2'] = recid2 except ValueError: json_response.update({ 'resultCode': 1, 'resultText': 'Invalid record ID!' }) return json.dumps(json_response) if json_data.has_key("duplicate"): if json_data.get('record2Mode') == "recid": json_data["duplicate"] = int(json_data["duplicate"]) # Authorization. user_info = collect_user_info(req) if user_info['email'] == 'guest': # User is not logged in. if not ajax_request: # Do not display the introductory recID selection box to guest # users (as it used to be with v0.99.0): auth_code, auth_message = acc_authorize_action( req, 'runbibmerge') referer = '/merge/' return page_not_authorized(req=req, referer=referer, text=auth_message, navtrail=navtrail) else: # Session has most likely timed out. json_response.update({ 'resultCode': 1, 'resultText': 'Error: Not logged in' }) return json.dumps(json_response) elif self.recid: # Handle RESTful call by storing recid and redirecting to # generic URL. redirect_to_url( req, '%s/%s/merge/' % (CFG_SITE_SECURE_URL, CFG_SITE_RECORD)) if recid1 is not None: # Authorize access to record 1. auth_code, auth_message = acc_authorize_action( req, 'runbibmerge', collection=guess_primary_collection_of_a_record(recid1)) if auth_code != 0: json_response.update({ 'resultCode': 1, 'resultText': 'No access to record %s' % recid1 }) return json.dumps(json_response) if recid2 is not None: # Authorize access to record 2. auth_code, auth_message = acc_authorize_action( req, 'runbibmerge', collection=guess_primary_collection_of_a_record(recid2)) if auth_code != 0: json_response.update({ 'resultCode': 1, 'resultText': 'No access to record %s' % recid2 }) return json.dumps(json_response) # Handle request. uid = getUid(req) if not ajax_request: # Show BibEdit start page. body, errors, warnings = perform_request_init() scripts = ["vendors/json2/json2.js", "js/merger/engine.js"] metaheaderadd = "" for script in scripts: metaheaderadd += '<script type="text/javascript" src="%s/%s"></script>' % ( CFG_SITE_URL, auto_version_url(script)) return page(title='Record Merger', metaheaderadd=metaheaderadd, body=body, errors=errors, warnings=warnings, uid=uid, language=argd['ln'], navtrail=navtrail, lastupdated=__lastupdated__, req=req) else: # Handle AJAX request. json_response = perform_request_ajax(req, uid, json_data) return json.dumps(json_response)
def index(self, req, form): """Handle all requests""" uid = getUid(req) argd = wash_urlargd(form, {'ln' : (str, CFG_SITE_LANG), 'state' : (str, '')}) ln = argd['ln'] state = argd['state'] _ = gettext_set_language(ln) # Abort if the simplejson module isn't available if not CFG_JSON_AVAILABLE: title = 'Authorlist Manager' body = '''Sorry, the record editor cannot operate when the `simplejson' module is not installed. Please see the INSTALL file.''' return page(title = title, body = body, errors = [], warnings = [], uid = uid, language = ln, navtrail = navtrail, lastupdated = __lastupdated__, req = req) # Extract additional JSON data from form if 'options' in form: options = json.loads(str(form['options'])) # Deunicode all strings (Invenio doesn't have unicode # support). options = json_unicode_to_utf8(options) # Authorization. not_authorized = authorlist_engine.user_authorization(req, ln) if not_authorized: return not_authorized # User is authorized, let's handle different states # if no state parameter, load the main page if state == '': return page(title = _('Author List Manager'), metaheaderadd = authorlist_templates.index_header(), body = authorlist_templates.body(), errors = [], warnings = [], uid = uid, language = ln, navtrail = navtrail, lastupdated = __lastupdated__, req = req) elif state == 'itemize': data = authorlist_db.itemize(uid) req.content_type = 'application/json' req.write(json.dumps(data)) # open paremeter set? initialize a Authorlist instance elif state == 'open': # if 'id' in url, check if user has right to modify this paper try: received = wash_urlargd(form, {'id': (str, None)}) paper_id = received['id'] if authorlist_engine.check_user_rights(uid, paper_id): return page(title = _('Author List Manager'), metaheaderadd = authorlist_templates.list_header(), body = authorlist_templates.body(), errors = [], warnings = [], uid = uid, language = ln, navtrail = navtrail, lastupdated = __lastupdated__, req = req) else: # no rights to modify this paper redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL)) except: # redirect to the main page if weird stuff happens redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL)) # On load state we will answer with the JSON encoded data of the passed # paper id. Should usually not be directly surfed by the user. elif state == 'load': try: received = wash_urlargd(form, {'id': (str, None)}) paper_id = received['id'] data = authorlist_db.load(paper_id) req.content_type = 'application/json' req.write(json.dumps(data)) except: # redirect to the main page if weird stuff happens redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL)) # The save state saves the send data in the database using the passed # paper id. Responds with a JSON object containing the id of the paper # as saved in the database. Should usually not be surfed directly by the # user elif state == 'save': try: received = wash_urlargd(form, {'id': (str, None), 'data': (str, '')}) paper_id = received['id'] in_data = json.loads(received['data']) out_data = authorlist_db.save(paper_id, uid, in_data) req.content_type = 'application/json' req.write(json.dumps(out_data)) except: # redirect to the main page if something weird happens redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL)) # Clones the paper with the given id in the database and responds with a # JSON object containing the id of the clone. Should usually not surfed # directly by the user. elif state == 'clone': try: received = wash_urlargd(form, {'id': (str, None)}) paper_id = received['id'] data = authorlist_db.clone(paper_id, uid) req.content_type = 'application/json' req.write(json.dumps(data)) except: # redirect to the main page if something weird happens redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL)) # Transform the sent data into the format passed in the URL using a # authorlist_engine converter. Reponds with the MIME type of the # converter and offers it as a download (content-disposition header). elif state == 'export': try: received = wash_urlargd(form, {'format': (str, None), 'data': (str, '')}) data_format = received['format'] data = received['data'] converter = authorlist_engine.Converters.get(data_format) attachement = 'attachement; filename="%s"' % converter.FILE_NAME req.headers_out['Content-Type'] = converter.CONTENT_TYPE req.headers_out['Content-Disposition'] = attachement #redirect_to_url(req, authorlist_engine.dumps(data, converter)) req.write(authorlist_engine.dumps(data, converter)) except: # throw exception if something weird happens return sys.exc_info() elif state == 'delete': try: received = wash_urlargd(form, {'id': (str, None)}) paper_id = received['id'] data = authorlist_db.delete(paper_id) req.content_type = 'application/json' req.write(json.dumps(data)) except: # redirect to the main page if something weird happens redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL)) elif state == 'import': try: received = wash_urlargd(form, {'importid': (str, None)}) recID = received['importid'] data = authorlist_engine.retrieve_data_from_record(recID) req.content_type = 'application/json' req.write(json.dumps(data)) except: # redirect to the main page if something weird happens redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL)) elif state == 'importxml': try: received = wash_urlargd(form, {'xmlfile': (Field, None)}) xml_string = received['xmlfile'].value import_data = authorlist_engine.retrieve_data_from_xml(xml_string) req.content_type = 'application/json' req.write(json.dumps(import_data)) except: # redirect to the main page if something weird happens redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL)) # No state given, just go to the main page. else: redirect_to_url(req, '%s/authorlist/' % (CFG_SITE_URL))
def index(self, req, form): """Handle all BibMerge requests. The responsibilities of this functions are: * JSON decoding and encoding. * Redirection, if necessary. * Authorization. * Calling the appropriate function from the engine. """ # If it is an Ajax request, extract any JSON data. ajax_request, recid1, recid2 = False, None, None argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)}) if 'jsondata' in form: json_data = json.loads(str(form['jsondata'])) # Deunicode all strings (Invenio doesn't have unicode # support). json_data = json_unicode_to_utf8(json_data) ajax_request = True json_response = {} if 'recID1' in json_data: recid1 = json_data['recID1'] if 'recID2' in json_data: recid2 = json_data['recID2'] # Authorization. user_info = collect_user_info(req) if user_info['email'] == 'guest': # User is not logged in. if not ajax_request: # Do not display the introductory recID selection box to guest # users (as it used to be with v0.99.0): auth_code, auth_message = acc_authorize_action(req, 'runbibmerge') referer = '/merge/' return page_not_authorized(req=req, referer=referer, text=auth_message, navtrail=navtrail) else: # Session has most likely timed out. json_response.update({'resultCode': 1, 'resultText': 'Error: Not logged in'}) return json.dumps(json_response) elif self.recid: # Handle RESTful call by storing recid and redirecting to # generic URL. redirect_to_url(req, '%s/%s/merge/' % (CFG_SITE_SECURE_URL, CFG_SITE_RECORD) ) if recid1 is not None: # Authorize access to record 1. auth_code, auth_message = acc_authorize_action(req, 'runbibmerge', collection=guess_primary_collection_of_a_record(recid1)) if auth_code != 0: json_response.update({'resultCode': 1, 'resultText': 'No access to record %s' % recid1}) return json.dumps(json_response) if recid2 is not None: # Authorize access to record 2. auth_code, auth_message = acc_authorize_action(req, 'runbibmerge', collection=guess_primary_collection_of_a_record(recid2)) if auth_code != 0: json_response.update({'resultCode': 1, 'resultText': 'No access to record %s' % recid2}) return json.dumps(json_response) # Handle request. uid = getUid(req) if not ajax_request: # Show BibEdit start page. body, errors, warnings = perform_request_init() metaheaderadd = """<script type="text/javascript" src="%(site)s/js/json2.js"></script> <script type="text/javascript" src="%(url)s"></script>""" % {'site': url_for('merger.static', filename='js/merger/engine.js')} title = 'Record Merger' return page(title = title, metaheaderadd = metaheaderadd, body = body, errors = errors, warnings = warnings, uid = uid, language = argd['ln'], navtrail = navtrail, lastupdated = __lastupdated__, req = req) else: # Handle AJAX request. json_response = perform_request_ajax(req, uid, json_data) return json.dumps(json_response)