def _process(self): principal = json.loads(request.form['principal']) permissions_tree = get_permissions_info(Event)[1] return jsonify_template( 'events/management/event_permissions_dialog.html', permissions_tree=permissions_tree, permissions=request.form.getlist('permissions'), principal=principal)
def _process(self): principal = json.loads(request.form['principal']) permissions_tree = get_permissions_info( PermissionsField.type_mapping[request.view_args['type']])[1] return jsonify_template( 'events/management/permissions_dialog.html', permissions_tree=permissions_tree, permissions=request.form.getlist('permissions'), principal=principal)
def storeTemplate(self, templateId, templateData): """ Adds a template to the conference. templateData is a string produced by converting the object "template" of the save() javascript function of WConfModifBadgeDesign.tpl into a JSON string. The string templateData is a list composed of: -The name of the template -A dictionary with 2 keys: width and height of the template, in pixels. -A number which is the number of pixels per cm. It is defined in WConfModifBadgeDesign.tpl. Right now its value is 50. -A list of dictionaries. Each dictionary has the attributes of one of the items of the template. If the template had any temporary backgrounds, they are archived. """ if self.__templates.has_key(templateId): self.__templates[templateId].setData(loads(templateData)) self.__templates[templateId].archiveTempBackgrounds(self.__conf) else: self.__templates[templateId] = BadgeTemplate(templateId, loads(templateData)) self.notifyModification()
def storeConvertedFile(requestIP, params): """ returns the path to the temp file used in the process so that it can be deleted at a later stage """ # extract the server name from the url serverURL = Config.getInstance().getFileConverterServerURL() up = urlparse.urlparse(serverURL) ip_addrs = resolve_host(up[1]) # check that the request comes from the conversion server if requestIP not in ip_addrs: return if params["status"] == '1': locator = {} # python dicts come with ' instead of " by default # using a json encoder on the server side would help... locator = loads(params["directory"].replace('\'', '"')) mat = CDSConvFileConverter._getMaterial(locator) if mat is not None: filePath = CDSConvFileConverter._saveFileToTemp(params) fileName = params["filename"] for resource in mat.getResourceList(): # if the pdf name is the same as any of the resources, and the material does not have a PDF yet: if isinstance( resource, conference.LocalFile) and os.path.splitext( resource.fileName)[0] == os.path.splitext( fileName)[0] and not mat.hasFile(fileName): resource.setPDFConversionRequestDate(None) f = conference.LocalFile() f.setName(fileName) f.setFileName(fileName) f.setFilePath(filePath) mat.addResource(f) return filePath else: #writeLog("Locator does not exist for file \"%s\": \n-locator:%s\nmessage:%s"%(params["filename"], params["directory"], params["error_message"])) pass else: #Here it should be processed the received error from the conversion server. #writeLog("Error converting file \"%s\": \n-locator:%s\nmessage:%s"%(params["filename"], params["directory"], params["error_message"])) pass
def storeConvertedFile(requestIP, params): """ returns the path to the temp file used in the process so that it can be deleted at a later stage """ # extract the server name from the url serverURL = Config.getInstance().getFileConverterServerURL() up = urlparse.urlparse(serverURL) ip_addrs = resolve_host(up[1]) # check that the request comes from the conversion server if requestIP not in ip_addrs: return if params["status"] == '1': locator={} # python dicts come with ' instead of " by default # using a json encoder on the server side would help... locator = loads(params["directory"].replace('\'','"')) mat=CDSConvFileConverter._getMaterial(locator) if mat is not None: filePath = CDSConvFileConverter._saveFileToTemp( params ) fileName = params["filename"] for resource in mat.getResourceList(): # if the pdf name is the same as any of the resources, and the material does not have a PDF yet: if isinstance(resource, conference.LocalFile) and os.path.splitext(resource.fileName)[0] == os.path.splitext(fileName)[0] and not mat.hasFile(fileName): resource.setPDFConversionRequestDate(None) f = conference.LocalFile() f.setName(fileName) f.setFileName( fileName ) f.setFilePath( filePath ) mat.addResource( f ) return filePath else: #writeLog("Locator does not exist for file \"%s\": \n-locator:%s\nmessage:%s"%(params["filename"], params["directory"], params["error_message"])) pass else: #Here it should be processed the received error from the conversion server. #writeLog("Error converting file \"%s\": \n-locator:%s\nmessage:%s"%(params["filename"], params["directory"], params["error_message"])) pass
def storeConvertedFile(requestIP, params): """ returns the path to the temp file used in the process so that it can be deleted at a later stage """ # extract the server name from the url serverURL = Config.getInstance().getFileConverterServerURL() up = urlparse.urlparse(serverURL) ip_addrs = resolve_host(up[1]) # check that the request comes from the conversion server if requestIP not in ip_addrs: logger.error('Request coming from {} not accepted (allowed IPs: {})'.format(requestIP, ip_addrs)) return if params["status"] == '1': locator={} # python dicts come with ' instead of " by default # using a json encoder on the server side would help... locator = loads(params["directory"].replace('\'','"')) mat=CDSConvFileConverter._getMaterial(locator) if mat is not None: filePath = CDSConvFileConverter._saveFileToTemp( params ) fileName = params["filename"] for resource in mat.getResourceList(): # if the pdf name is the same as any of the resources, and the material does not have a PDF yet: if isinstance(resource, conference.LocalFile) and os.path.splitext(resource.fileName)[0] == os.path.splitext(fileName)[0] and not mat.hasFile(fileName): resource.setPDFConversionRequestDate(None) f = conference.LocalFile() f.setName(fileName) f.setFileName( fileName ) f.setFilePath( filePath ) mat.addResource( f ) logger.info("File '{}' stored in {}".format(f.getName(), locator)) return filePath else: logger.error('Locator could not be resolved: {}'.format(params)) pass else: logger.error('Error converting file: {}'.format(params))
def _checkParams(self, params): self._params = params self._action = "" self._overwrite = False #if request has already been handled (DB conflict), then we keep the existing files list self._files = [] self._links = [] self._topdf = "topdf" in params self._displayName = params.get("displayName", "").strip() self._uploadType = params.get("uploadType", "") self._materialId = params.get("materialId", "") self._description = params.get("description", "") self._statusSelection = int(params.get("statusSelection", 1)) self._visibility = int(params.get("visibility", 0)) self._password = params.get("password", "") self._doNotSanitizeFields.append("password") self._userList = json.loads(params.get("userList", "[]")) maxUploadFilesTotalSize = float(self._cfg.getMaxUploadFilesTotalSize()) if self._uploadType == "file": if isinstance(params["file"], list): files = params["file"] self._displayName = "" self._description = "" else: files = [params["file"]] for fileUpload in files: if type(fileUpload) != str and fileUpload.filename.strip( ) != "": fDict = {} fDict["fileName"] = fileUpload.filename.encode("utf-8") estimSize = request.content_length if maxUploadFilesTotalSize and estimSize > ( maxUploadFilesTotalSize * BYTES_1MB): # if file is too big, do not save it in disk fDict["filePath"] = '' fDict["size"] = estimSize else: fDict["filePath"] = self._saveFileToTemp(fileUpload) fDict["size"] = os.path.getsize(fDict["filePath"]) self._setErrorList(fDict) self._files.append(fDict) elif self._uploadType == "link": if isinstance(params["url"], list): urls = params["url"] self._displayName = "" self._description = "" else: urls = [params["url"]] matType = params.get("materialType", "") for url in urls: if not url.strip(): continue link = {} link["url"] = url link["matType"] = matType self._links.append(link)
def decode(s): return unicode_struct_to_utf8(loads(s))
def decode(str): return unicodeToUtf8(loads(str))
class RedisScript(object): """Wrapper for redis scripts. Makes passing arguments more comfortable and allows result conversion. """ RESULT_PROCESSORS = { 'json': json.loads, 'json_odict': lambda x: OrderedDict(json.loads(x)) } @classmethod def from_file(cls, client, filename, name=None, _broken=False): """Load script and metadata from a file. Metadata syntax: comma-separated key=value pairs in the first line. """ if not name: name = os.path.splitext(os.path.basename(filename))[0] with open(filename) as f: metadata_line = f.readline().strip().lstrip('- \t') try: metadata = dict( re.search(r'(\S+)=(\S+)', item).groups() for item in metadata_line.split(',')) except AttributeError: raise ValueError('Invalid metadata line: %s' % metadata_line) return cls(client, name, metadata, f.read(), _broken) @classmethod def load_directory(cls, client, path='.'): """Load scripts from the given directory. Scripts must have a .lua extension and will be named like the file.""" scripts = {} failed = False for filename in glob.iglob(os.path.join(path, '*.lua')): # If one script fails to laod because of a ConnectionError we don't even try # to load other scripts to avoid long timeouts script = cls.from_file(client, filename, _broken=failed) failed = script.broken scripts[script.name] = script return scripts def __init__(self, client, name, metadata, code, _broken=False): self.broken = False self.name = name self._check_metadata(metadata) if _broken: # If we already know that redis is broken we can keep things fast # by not even trynig to send the script to redis self._script = _BrokenScript(name, client) self.broken = True return from indico.util.redis import ConnectionError try: self._script = client.register_script(code) except ConnectionError: Logger.get('redis').exception('Could not load script %s' % name) self._script = _BrokenScript(name, client) self.broken = True def _check_metadata(self, metadata): result_type = metadata.get('result') self._process_result = self.RESULT_PROCESSORS[ result_type] if result_type else None self._args = int(metadata.get('args', 0)) if self._args < 0: raise ValueError('Argument count cannot be negative') def __call__(self, *args, **kwargs): """Execute the script""" if len(args) != self._args: raise TypeError('Script takes exactly %d argument (%d given)' % (self._args, len(args))) client = kwargs.get('client', self._script.registered_client) # redis-py checks if the client is an instance of redis-py.client.BasePipeline. # And if it's wrapped in a LocalProxy, we need to get it out first. if isinstance(client, LocalProxy): client = client._get_current_object() import redis if isinstance(client, redis.client.BasePipeline) and self._process_result: raise ValueError( 'Script with result conversion cannot be called on a pipeline') try: res = self._script(args=args, client=client) except redis.RedisError, e: # If we are not on a pipeline and the execution fails, log it with arguments Logger.get('redis').exception('Executing %s(%r) failed', self.name, args) return None if isinstance(self._script, _BrokenScript): # If we "called" a broken script it logged itself being broken but we need to bail out early return None return self._process_result(res) if self._process_result else res
def _checkParams(self, params): self._params = params self._action = "" self._overwrite = False # if request has already been handled (DB conflict), then we keep the existing files list self._files = [] self._links = [] self._topdf = "topdf" in params self._displayName = params.get("displayName", "").strip() self._uploadType = params.get("uploadType", "") self._materialId = params.get("materialId", "") self._description = params.get("description", "") self._statusSelection = int(params.get("statusSelection", 1)) self._visibility = int(params.get("visibility", 0)) self._password = params.get("password", "") self._doNotSanitizeFields.append("password") self._userList = json.loads(params.get("userList", "[]")) maxUploadFilesTotalSize = float(self._cfg.getMaxUploadFilesTotalSize()) if self._uploadType == "file": if isinstance(params["file"], list): files = params["file"] self._displayName = "" self._description = "" else: files = [params["file"]] for fileUpload in files: if type(fileUpload) != str and fileUpload.filename.strip() != "": fDict = {} fDict["fileName"] = fileUpload.filename.encode("utf-8") estimSize = request.content_length if maxUploadFilesTotalSize and estimSize > (maxUploadFilesTotalSize * BYTES_1MB): # if file is too big, do not save it in disk fDict["filePath"] = "" fDict["size"] = estimSize else: fDict["filePath"] = self._saveFileToTemp(fileUpload) fDict["size"] = os.path.getsize(fDict["filePath"]) self._setErrorList(fDict) self._files.append(fDict) elif self._uploadType == "link": if isinstance(params["url"], list): urls = params["url"] self._displayName = "" self._description = "" else: urls = [params["url"]] matType = params.get("materialType", "") for url in urls: if not url.strip(): continue link = {} link["url"] = url link["matType"] = matType self._links.append(link)
def _process(self): principal = json.loads(request.form['principal']) permissions_tree = get_permissions_info(PermissionsField.type_mapping[request.view_args['type']])[1] return jsonify_template('events/management/permissions_dialog.html', permissions_tree=permissions_tree, permissions=request.form.getlist('permissions'), principal=principal)