Exemple #1
0
class CsvHandler(handler.ContentHandler):
    def __init__(self, main_tag, tags, out_fid, header=True, deps={}):
        self.main_tag = main_tag
        self.store = tags
        self.tags = list(set(tags + list(itertools.chain(*deps.values()))))
        self.deps = deps
        self.out_fid = out_fid
        self.header = header

        self.in_flags_0 = OrderedDict(zip(self.tags, len(self.tags) * [False]))
        self.content_0 = OrderedDict(zip(self.store, len(self.store) * ['']))

    def startDocument(self):
        self.num_recs = 0
        self.last_tag = None

        if self.header:
            self.out_fid.write(','.join(self.content_0) + '\n')

    def endDocument(self):
        pass

    def startElement(self, name, attrs):
        if name == self.main_tag:
            self.in_flags = self.in_flags_0.copy()
            self.content = self.content_0.copy()

        if name in self.tags:
            if and_reduce(
                [self.in_flags[nm] for nm in self.deps.get(name, [])]):
                self.in_flags[name] = True
                if name in self.store:
                    self.last_tag = name
                    self.content[name] = ''
        else:
            self.last_tag = None

    def endElement(self, name):
        self.last_tag = None
        if name == self.main_tag:
            self.num_recs += 1
            self.writeRecord()
        elif name in self.tags:
            self.in_flags[name] = False

    def characters(self, content):
        if self.last_tag:
            self.content[self.last_tag] += content

    def writeRecord(self):
        self.out_fid.write('\"' + '\",\"'.join(self.content.values()) + '\"\n')
Exemple #2
0
class CsvHandler(handler.ContentHandler):
  def __init__(self,main_tag,tags,out_fid,header=True,deps={}):
    self.main_tag = main_tag
    self.store = tags
    self.tags = list(set(tags+list(itertools.chain(*deps.values()))))
    self.deps = deps
    self.out_fid = out_fid
    self.header = header

    self.in_flags_0 = OrderedDict(zip(self.tags,len(self.tags)*[False]))
    self.content_0 = OrderedDict(zip(self.store,len(self.store)*['']))

  def startDocument(self):
    self.num_recs = 0
    self.last_tag = None

    if self.header:
      self.out_fid.write(','.join(self.content_0)+'\n')

  def endDocument(self):
    pass

  def startElement(self, name, attrs):
    if name == self.main_tag:
      self.in_flags = self.in_flags_0.copy()
      self.content = self.content_0.copy()

    if name in self.tags:
      if and_reduce([self.in_flags[nm] for nm in self.deps.get(name,[])]):
        self.in_flags[name] = True
        if name in self.store:
	   self.last_tag = name
	   self.content[name] = ''
    else:
      self.last_tag = None

  def endElement(self, name):
    self.last_tag = None
    if name == self.main_tag:
      self.num_recs += 1
      self.writeRecord()
    elif name in self.tags:
      self.in_flags[name] = False

  def characters(self, content):
    if self.last_tag:
      self.content[self.last_tag] += content

  def writeRecord(self):
    self.out_fid.write('\"'+'\",\"'.join(self.content.values())+'\"\n')
Exemple #3
0
    def __new__(cls, name, bases, attrs):
        fields = OrderedDict()
        for name, attr in list(attrs.iteritems()):  # explicit copy
            # isinstance(attr, type) == attr is a class
            if isinstance(attr, Base):
                fields[name] = attr
                del attrs[name]
        attrs['named_types'] = fields
        attrs['required'] = attrs.get('required', True)
        attrs['default_data'] = {}

        attrs['args'] = [fields.copy()]
        attrs['kwargs'] = {'required': attrs['required']}

        return type.__new__(cls, name, bases, attrs)
    def getlvl0data(self, raw, query=None):
        '''Level zero presents each year with total file sizes
            raw data for this method should be the master list of files'''

        if query != None:
            selection = raw[query]
            # save the raw data for this level for reverse navigation
            self.selectionhistory[self.level] = selection[0:]
            self.queryhistory[self.level] = query
        else:
            selection = raw

        # Iterate through the master list and group files by year
        newraw = OrderedDict({})
        print len(selection), type(selection)
        for file in selection:

            year = file[5]
            try:
                newraw[year].append(file)
            except:
                newraw[year] = [file]

        # Get an empty copy of the years for that data dictionary
        data = newraw.copy()
        for year in data:
            data[year] = 0
        temp = data.items()
        temp.sort()
        temp.reverse()
        data = OrderedDict(temp)

        # Iterate through the raw list and group the data by year
        for pair in newraw.iteritems():
            year = pair[0]
            files = pair[1]
            for file in files:
                data[year] += file[4]

        # Save the raw that level 1 will use to sort itself out
        self.updateRaw(newraw)
        return data
 def test_copying(self):
     # Check that ordered dicts are copyable, deepcopyable, picklable,
     # and have a repr/eval round-trip
     pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
     od = OrderedDict(pairs)
     update_test = OrderedDict()
     update_test.update(od)
     for i, dup in enumerate([
                 od.copy(),
                 copy.copy(od),
                 copy.deepcopy(od),
                 pickle.loads(pickle.dumps(od, 0)),
                 pickle.loads(pickle.dumps(od, 1)),
                 pickle.loads(pickle.dumps(od, 2)),
                 pickle.loads(pickle.dumps(od, -1)),
                 eval(repr(od)),
                 update_test,
                 OrderedDict(od),
                 ]):
         self.assert_(dup is not od)
         self.assertEquals(dup, od)
         self.assertEquals(list(dup.items()), list(od.items()))
         self.assertEquals(len(dup), len(od))
         self.assertEquals(type(dup), type(od))
 def test_copying(self):
     # Check that ordered dicts are copyable, deepcopyable, picklable,
     # and have a repr/eval round-trip
     pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
     od = OrderedDict(pairs)
     update_test = OrderedDict()
     update_test.update(od)
     for i, dup in enumerate([
             od.copy(),
             copy.copy(od),
             copy.deepcopy(od),
             pickle.loads(pickle.dumps(od, 0)),
             pickle.loads(pickle.dumps(od, 1)),
             pickle.loads(pickle.dumps(od, 2)),
             pickle.loads(pickle.dumps(od, -1)),
             eval(repr(od)),
             update_test,
             OrderedDict(od),
     ]):
         self.assert_(dup is not od)
         self.assertEquals(dup, od)
         self.assertEquals(list(dup.items()), list(od.items()))
         self.assertEquals(len(dup), len(od))
         self.assertEquals(type(dup), type(od))
    def calc_gradient(self):
        """Calculates the gradient vectors for all outputs in this Driver's
        workflow."""
        
        # Each component runs its calc_derivatives method.
        # We used to do this in the driver instead, but we've moved it in
        # here to make the interface more uniform.
        self._parent.calc_derivatives(first=True)
        
        self.setup()

        # Create our 2D dictionary the first time we execute.
        if not self.gradient:
            for name in self.param_names:
                self.gradient[name] = {}
                
        # Pull initial state and stepsizes from driver's parameters
        base_param = OrderedDict()
        stepsize = {}
        for key, item in self._parent.get_parameters().iteritems():
            base_param[key] = item.evaluate()
            
            if item.fd_step:
                stepsize[key] = item.fd_step
            else:
                stepsize[key] = self.default_stepsize

        # For Forward or Backward diff, we want to save the baseline
        # objective and constraints. These are also needed for the
        # on-diagonal Hessian terms, so we will save them in the class
        # later.
        base_data = self._run_point(base_param)
        
        # Set up problem based on Finite Difference type
        if self.form == 'central':
            deltas = [1, -1]
            func = diff_1st_central
        elif self.form == 'forward':
            deltas = [1, 0]
            func = diff_1st_fwrdbwrd
        else:
            deltas = [0, -1]
            func = diff_1st_fwrdbwrd

        self.gradient_case = OrderedDict()

        # Assemble input data
        for param in self.param_names:
            
            pcase = []
            for j_step, delta in enumerate(deltas):
                
                case = base_param.copy()
                case[param] += delta*stepsize[param]
                pcase.append({ 'param': case })
                
            self.gradient_case[param] = pcase
            
        # Run all "cases".
        # TODO - Integrate OpenMDAO's concurrent processing capability once it
        # is formalized. This operation is inherently paralellizable.
        for key, case in self.gradient_case.iteritems():
            for ipcase, pcase in enumerate(case):
                if deltas[ipcase]:
                    pcase['data'] = self._run_point(pcase['param'])
                else:
                    pcase['data'] = base_data
                
        
        # Calculate gradients
        for key, case in self.gradient_case.iteritems():
            
            eps = stepsize[key]
            
            for name in list(self.objective_names + \
                             self.eqconst_names + \
                             self.ineqconst_names):
                self.gradient[key][name] = \
                    func(case[0]['data'][name],
                         case[1]['data'][name], eps)

        # Save these for Hessian calculation
        self.base_param = base_param
        self.base_data = base_data
categories['AC'] = "varchar(30) NOT NULL"  # accesion number
categories['SQ'] = "text(45000) NOT NULL"  # SQ   SEQUENCE XXXX AA; XXXXX MW; XXXXXXXXXXXXXXXX CRC64;
categories['LENGTH'] = "varchar(200) NOT NULL"  # SQ   SEQUENCE XXXX AA; XXXXX MW; XXXXXXXXXXXXXXXX CRC64;
categories['ORG'] = "text(500) NOT NULL"  # organism
categories['OC'] = "varchar(30) NOT NULL"  # organism classification, vamos solo con el dominio
categories['OX'] = "varchar(200) NOT NULL"  # taxonomic ID
categories['HO'] = "text(500)"  # host organism
categories['inumber'] = "varchar(200) NOT NULL"
# categories['CC'] = "varchar(200)"  # comments section, nos interesa el campo "PTM"
# categories['SQi'] = "varchar(200)"  # SQ   SEQUENCE XXXX AA; XXXXX MW; XXXXXXXXXXXXXXXX CRC64;

# Defino un diccionario modelo donde cargar los valores que voy a extraer de la lista
empty_data = OrderedDict()
for gato in categories:  # usando las keys de categories y un valor por defecto todo vacío no es nulo ¿cómo hago?
    empty_data[gato] = ''
data = empty_data.copy()  # este es el diccionario de registros vacío que voy a usar

cur.execute("DROP TABLE " + tabla_sequence + ";")

# Crear la tabla de secuencias
table_def_items = []  # lista para concatenaciones de key y valor
for cat, value in categories.items():  # concatenaciones key y valor
    table_def_items.append(cat + ' ' + value)  # guardadaes en la lista
table_def_2 = ', '.join(table_def_items)  # definicion de la tabla
cur.execute("CREATE TABLE IF NOT EXISTS " + tabla_sequence + " (" + table_def_2 + ") ENGINE=InnoDB;")
con.commit()

# Variables del loop
i = 0
j = 0
ptm = ''
Exemple #9
0
 def copy(self):
    return ParamReader(D=OrderedDict.copy(self))
Exemple #10
0
 def copy(self):
     return PuPyDictionary(OrderedDict.copy(self))
Exemple #11
0
    def getlvl0data(self, raw, query=False):
        '''Level zero presents each year with total file sizes
            raw data for this method should be the master list of files'''

        selection = raw[query]
        if len(selection) <= 1:
            #print 'Length of selection is:', len(selection)
            #print 'Selection data is:', selection
            # Can't go further - Rollback
            print 'Cannot go further.... Please select another range'
            pop = self.queryhistory.pop(self.level)
            print 'Popped %s. History now: %s' % (str(pop),
                                                  str(self.queryhistory))

            self.level -= 1
            self.query = self.queryhistory[self.level]
            raw = self.selectionhistory[self.level]
            selection = raw[self.query]

        # Iterate through the master list and get max/min sizes
        newraw = OrderedDict({})
        smax, smin = selection[0][4], selection[0][4]

        for file in selection:
            size = file[4]
            if size > smax:
                smax = size
            elif size < smin:
                smin = size

        smax += 10  #Hack to ensure biggest file falls into the mix
        #print 'Length of selection is:', len(selection)
        #if self.level > 0: print 'Selection data is:', selection
        #print 'Min = %f, Max = %f' %(smin, smax), '\n\n'

        # Create the bins with size ranges
        bincount = 15.00
        binsize = (smax - smin) / bincount
        low = smin
        bindata = OrderedDict({})  # captures the number of files per bin
        binlimits = OrderedDict({})  # holds the upper size limit for each bin

        for bin in range(int(bincount)):
            high = low + binsize
            label = '%f-%f' % (low, high)
            binlimits[label] = high
            low = high

        # Reiterate and get the no of files per bin
        newraw = binlimits.copy()  # raw data for use in next level
        bindata = binlimits.copy()
        for bin in newraw:
            newraw[bin] = []  # empty the copy
        for bin in bindata:
            bindata[bin] = 0  # empty the copy

        #print 'Binlimits: %s' %str(binlimits)
        for file in selection:
            size = file[4]
            for bin in binlimits:
                if size <= binlimits[bin]:
                    newraw[bin].append(file)
                    bindata[bin] += 1
                    break  # sue me

        # Iterate through the raw list and group the data by year
        data = bindata.copy()

        # save the raw data for this level for reverse navigation
        self.selectionhistory[self.level] = raw.copy()

        # Save the raw that level 1 will use to sort itself out
        self.updateRaw(newraw)
        #print '\n\nData for this run: ,', data
        return data
Exemple #12
0
class Application(object):
    """Poor WSGI application which is called by WSGI server.

    Working of is describe in PEP 0333. This object store route dispatch table,
    and have methods for it's using and of course __call__ method for use
    as WSGI application.
    """

    __instances = []

    def __init__(self, name="__main__"):
        """Application class is per name singleton.

        That means, there could be exist only one instance with same name.
        """

        if Application.__instances.count(name):
            raise RuntimeError('Application with name %s exist yet.' % name)
        Application.__instances.append(name)

        # Application name
        self.__name = name

        # list of pre and post process handlers
        self.__pre = []
        self.__post = []

        # dhandlers table for default handers on methods {METHOD_GET: handler}
        self.__dhandlers = {}

        # handlers table of simple paths: {'/path': {METHOD_GET: handler}}
        self.__handlers = {}

        self.__filters = {
            ':int': (r'-?\d+', int),
            ':float': (r'-?\d+(\.\d+)?', float),
            ':word': (r'\w+', uni),
            ':hex': (r'[0-9a-fA-F]+', str),
            ':re:': (None, uni),
            'none': (r'[^/]+', uni)
        }

        # handlers of regex paths: {r'/user/([a-z]?)': {METHOD_GET: handler}}
        self.__rhandlers = OrderedDict()

        # http state handlers: {HTTP_NOT_FOUND: {METHOD_GET: my_404_handler}}
        self.__shandlers = {}

        # -- Application variable
        self.__config = {
            'auto_args': True,
            'auto_form': True,
            'auto_json': True,
            'keep_blank_values': 0,
            'strict_parsing': 0,
            'json_content_types': [
                'application/json',
                'application/javascript',
                'application/merge-patch+json'],
            'form_content_types': [
                'application/x-www-form-urlencoded',
                'multipart/form-data'
            ],
            'auto_cookies': True,
            'debug': 'Off',
            'document_root': '',
            'document_index': 'Off',
            'secret_key': '%s%s%s%s' %
                          (__version__, version, getcwd(),
                           ''.join(str(x) for x in uname()))
        }

        try:
            self.__log_level = levels[environ.get('poor_LogLevel',
                                                  'warn').lower()]
        except:
            self.__log_level = LOG_WARNING
            self.log_error('Bad poor_LogLevel, default is warn.', LOG_WARNING)
        # endtry
    # enddef

    def __regex(self, match):
        groups = match.groups()
        _filter = str(groups[1]).lower()

        if _filter in self.__filters:
            regex = self.__filters[_filter][0]
        elif _filter[:4] == ':re:':     # :re: filter have user defined regex
            regex = _filter[4:]
        else:
            try:
                regex = self.__filters[_filter][0]
            except KeyError:
                raise RuntimeError("Undefined route group filter '%s'" %
                                   _filter)

        return "(?P<%s>%s)" % (groups[0], regex)
    # enddef

    def __convertor(self, _filter):
        _filter = str(_filter).lower()
        _filter = ':re:' if _filter[:4] == ':re:' else _filter
        try:
            return self.__filters[_filter][1]
        except KeyError:
            raise RuntimeError("Undefined route group filter '%s'" % _filter)

    @property
    def name(self):
        """Return application name."""
        return self.__name

    @property
    def filters(self):
        """Copy of filter table.

        Filter table contains regular expressions and convert functions,
        see Application.set_filter and Application.route.

        Default filters are:
            :int - match number and convert it to int
            :float - match number and convert it to float
            :word - match one unicoee word
            :hex - match hexadecimal value and convert it to str
            :re: - match user defined regular expression
            none - match any string withount '/' character

        For more details see {/debug-info} page of your application, where
        you see all filters with regular expression definition.
        """
        return self.__filters.copy()

    @property
    def pre(self):
        """Tuple of table with pre-process handlers.

        See Application.pre_process.
        """
        return tuple(self.__pre)

    @property
    def post(self):
        """Tuple of table with post-process handlers.

        See Application.post_process.
        """
        return tuple(self.__post)

    @property
    def dhandlers(self):
        """Copy of table with default handlers.

        See Application.set_default
        """
        return self.__dhandlers.copy()

    @property
    def handlers(self):
        """Copy of table with static handlers.

        See Application.route.
        """
        return self.__handlers.copy()

    @property
    def rhandlers(self):
        """Copy of table with regular expression handlers.

        See Application.route and Application.rroute.
        """
        return self.__rhandlers.copy()

    @property
    def shandlers(self):
        """Copy of table with http state aka error handlers.

        See Application.http_state
        """
        return self.__shandlers.copy()

    @property
    def auto_args(self):
        """Automatic parsing request arguments from uri.

        If it is True (default), Request object do automatic parsing request
        uri to its args variable.
        """
        return self.__config['auto_args']

    @auto_args.setter
    def auto_args(self, value):
        self.__config['auto_args'] = bool(value)

    @property
    def auto_form(self):
        """Automatic parsing arguments from request body.

        If it is True (default) and method is POST, PUT or PATCH, and
        request content type is one of form_content_types, Request
        object do automatic parsing request body to its form variable.
        """
        return self.__config['auto_form']

    @auto_form.setter
    def auto_form(self, value):
        self.__config['auto_form'] = bool(value)

    @property
    def auto_json(self):
        """Automatic parsing JSON from request body.

        If it is True (default), method is POST, PUT or PATCH and request
        content type is one of json_content_types, Request object do
        automatic parsing request body to json variable.
        """
        return self.__config['auto_json']

    @auto_json.setter
    def auto_json(self, value):
        self.__config['auto_json'] = bool(value)

    @property
    def auto_cookies(self):
        """Automatic parsing cookies from request headers.

        If it is True (default) and Cookie request header was set,
        SimpleCookie object was paresed to Request property cookies.
        """
        return self.__config['auto_cookies']

    @auto_cookies.setter
    def auto_cookies(self, value):
        self.__config['auto_cookies'] = bool(value)

    @property
    def debug(self):
        """Application debug as another way how to set poor_Debug.

        This setting will be rewrite by poor_Debug environment variable.
        """
        return self.__config['debug'] == 'On'

    @debug.setter
    def debug(self, value):
        self.__config['debug'] = 'On' if bool(value) else 'Off'

    @property
    def document_root(self):
        """Application document_root as another way how to set poor_DocumentRoot.

        This setting will be rewrite by poor_DocumentRoot environ variable.
        """
        return self.__config['document_root']

    @document_root.setter
    def document_root(self, value):
        self.__config['document_root'] = value

    @property
    def document_index(self):
        """Application document_root as another way how to set poor_DocumentRoot.

        This setting will be rewrite by poor_DocumentRoot environ variable.
        """
        return self.__config['document_index'] == 'On'

    @document_index.setter
    def document_index(self, value):
        self.__config['document_index'] = 'On' if bool(value) else 'Off'

    @property
    def secret_key(self):
        """Application secret_key could be replace by poor_SecretKey in request.

        Secret key is used by PoorSession class. It is generate from
        some server variables, and the best way is set to your own long
        key."""
        return self.__config['secret_key']

    @secret_key.setter
    def secret_key(self, value):
        self.__config['secret_key'] = value

    @property
    def keep_blank_values(self):
        """Keep blank values in request arguments.

        If it is 1 (0 is default), automatic parsing request uri or body
        keep blank values as empty string.
        """
        return self.__config['keep_blank_values']

    @keep_blank_values.setter
    def keep_blank_values(self, value):
        self.__config['keep_blank_values'] = int(value)

    @property
    def strict_parsing(self):
        """Strict parse request arguments.

        If it is 1 (0 is default), automatic parsing request uri or body
        raise with exception on parsing error.
        """
        return self.__config['strict_parsing']

    @strict_parsing.setter
    def strict_parsing(self, value):
        self.__config['strict_parsing'] = int(value)

    @property
    def json_content_types(self):
        """Copy of json content type list.

        Containt list of strings as json content types, which is use for
        testing, when automatics Json object is create from request body.
        """
        return self.__config['json_content_types']

    @property
    def form_content_types(self):
        """Copy of form content type list.

        Containt list of strings as form content types, which is use for
        testing, when automatics Form object is create from request body.
        """
        return self.__config['form_content_types']

    def set_filter(self, name, regex, convertor=uni):
        """Create new filter or overwrite builtins.

        Arguments:
            name      - Name of filter which is used in route or set_route
                        method.
            regex     - regular expression which used for filter
            convertor - convertor function or class, which gets unicode in
                        input. Default is uni function, which is wrapper
                        to unicode string.

            app.set_filter('uint', r'\d+', int)
        """
        name = ':'+name if name[0] != ':' else name
        self.__filters[name] = (regex, convertor)

    def pre_process(self):
        """Append pre process hendler.

        This is decorator for function to call before each request.

            @app.pre_process()
            def before_each_request(req):
                ...
        """
        def wrapper(fn):
            self.__pre.append(fn)
            return fn
        return wrapper
    # enddef

    def add_pre_process(self, fn):
        """Append pre proccess handler.

        Method adds function to list functions which is call before each
        request.

            app.add_pre_process(before_each_request)
        """
        self.__pre.append(fn)
    # enddef

    def post_process(self):
        """Append post process handler.

        This decorator append function to be called after each request,
        if you want to use it redefined all outputs.

            @app.pre_process()
            def after_each_request(req):
                ...
        """
        def wrapper(fn):
            self.__post.append(fn)
            return fn
        return wrapper
    # enddef

    def add_post_process(self, fn):
        """Append post process handler.

        Method for direct append function to list functions which are called
        after each request.

            app.add_post_process(after_each_request)
        """
        self.__post.append(fn)
    # enddef

    def default(self, method=METHOD_HEAD | METHOD_GET):
        """Set default handler.

        This is decorator for default handler for http method (called before
        error_not_found).

            @app.default(METHOD_GET_POST)
            def default_get_post(req):
                # this function will be called if no uri match in internal
                # uri table with method. It's similar like not_found error,
                # but without error
                ...
        """
        def wrapper(fn):
            self.set_default(fn, method)
        return wrapper
    # enddef

    def set_default(self, fn, method=METHOD_HEAD | METHOD_GET):
        """Set default handler.

        Set fn default handler for http method called befor error_not_found.

            app.set_default(default_get_post, METHOD_GET_POST)
        """
        for m in methods.values():
            if method & m:
                self.__dhandlers[m] = fn
    # enddef

    def pop_default(self, method):
        """Pop default handler for method."""
        return self.__dhandlers(method)

    def route(self, uri, method=METHOD_HEAD | METHOD_GET):
        """Wrap function to be handler for uri and specified method.

        You can define uri as static path or as groups which are hand
        to handler as next parameters.

            # static uri
            @app.route('/user/post', method=METHOD_POST)
            def user_create(req):
                ...

            # group regular expression
            @app.route('/user/<name>')
            def user_detail(req, name):
                ...

            # group regular expression with filter
            @app.route('/<surname:word>/<age:int>')
            def surnames_by_age(req, surname, age):
                ...

            # group with own regular expression filter
            @app.route('/<car:re:\w+>/<color:re:#[\da-fA-F]+>')
            def car(req, car, color):
                ...

        If you can use some name of group which is python keyword, like class,
        you can use **kwargs syntax:

            @app.route('/<class>/<len:int>')
            def classes(req, **kwargs):
                return "'%s' class is %d lenght." % \
                    (kwargs['class'], kwargs['len'])

        Be sure with ordering of call this decorator or set_route function with
        groups regular expression. Regular expression routes are check with the
        same ordering, as you create internal table of them. First match stops
        any other searching. In fact, if groups are detect, they will be
        transfer to normal regular expression, and will be add to second
        internal table.
        """
        def wrapper(fn):
            self.set_route(uri, fn, method)
            return fn
        return wrapper
    # enddef

    def set_route(self, uri, fn, method=METHOD_HEAD | METHOD_GET):
        """Set handler for uri and method.

        Another way to add fn as handler for uri. See Application.route
        documentation for details.

            app.set_route('/use/post', user_create, METHOD_POST)
        """
        uri = uni(uri)

        if re_filter.search(uri):
            r_uri = re_filter.sub(self.__regex, uri) + '$'
            convertors = tuple((g[0], self.__convertor(g[1]))
                               for g in (m.groups()
                               for m in re_filter.finditer(uri)))
            self.set_rroute(r_uri, fn, method, convertors)
        else:
            if uri not in self.__handlers:
                self.__handlers[uri] = {}
            for m in methods.values():
                if method & m:
                    self.__handlers[uri][m] = fn
    # enddef

    def pop_route(self, uri, method):
        """Pop handler for uri and method from handers table.

        Method must be define unique, so METHOD_GET_POST could not be use.
        If you want to remove handler for both methods, you must call pop route
        for each method state.
        """
        uri = uni(uri)

        if re_filter.search(uri):
            r_uri = re_filter.sub(self.__regex, uri) + '$'
            return self.pop_rroute(r_uri, method)
        else:
            handlers = self.__handlers.get(uri, {})
            rv = handlers.pop(method)
            if not handlers:    # is empty
                self.__handlers.pop(uri, None)
            return rv

    def is_route(self, uri):
        """Check if uri have any registered record."""
        uri = uni(uri)
        if re_filter.search(uri):
            r_uri = re_filter.sub(self.__regex, uri) + '$'
            return self.is_rroute(r_uri)
        return uri in self.__handlers

    def rroute(self, ruri, method=METHOD_HEAD | METHOD_GET):
        """Wrap function to be handler for uri defined by regular expression.

        Both of function, rroute and set_rroute store routes to special
        internal table, which is another to table of static routes.

            @app.rroute(r'/user/\w+')               # simple regular expression
            def any_user(req):
                ...

            @app.rroute(r'/user/(?P<user>\w+)')     # regular expression with
            def user_detail(req, user):             # groups
                ...

        Be sure with ordering of call this decorator or set_rroute function.
        Regular expression routes are check with the same ordering, as you
        create internal table of them. First match stops any other searching.
        """
        def wrapper(fn):
            self.set_rroute(ruri, fn, method)
            return fn
        return wrapper
    # enddef

    def set_rroute(self, r_uri, fn, method=METHOD_HEAD | METHOD_GET,
                   convertors=()):
        """Set hanlder for uri defined by regular expression.

        Another way to add fn as handler for uri defined by regular expression.
        See Application.rroute documentation for details.

            app.set_rroute('/use/\w+/post', user_create, METHOD_POST)

        This method is internally use, when groups are found in static route,
        adding by route or set_route method.
        """
        r_uri = re.compile(r_uri, re.U)
        if r_uri not in self.__rhandlers:
            self.__rhandlers[r_uri] = {}
        for m in methods.values():
            if method & m:
                self.__rhandlers[r_uri][m] = (fn, convertors)
    # enddef

    def pop_rroute(self, r_uri, method):
        """Pop handler and convertors for uri and method from handlers table.

        For mor details see Application.pop_route.
        """
        r_uri = re.compile(r_uri, re.U)
        handlers = self.__rhandlers.get(r_uri, {})
        rv = handlers.pop(method)
        if not handlers:    # is empty
            self.__rhandlers.pop(r_uri, None)
        return rv

    def is_rroute(self, r_uri):
        """Check if regular expression uri have any registered record."""
        r_uri = re.compile(r_uri, re.U)
        return r_uri in self.__rhandlers

    def http_state(self, code, method=METHOD_HEAD | METHOD_GET | METHOD_POST):
        """Wrap function to handle http status codes like http errors."""
        def wrapper(fn):
            self.set_http_state(code, fn, method)
        return wrapper
    # enddef

    def set_http_state(self, code, fn,
                       method=METHOD_HEAD | METHOD_GET | METHOD_POST):
        """Set fn as handler for http state code and method."""
        if code not in self.__shandlers:
            self.__shandlers[code] = {}
        for m in methods.values():
            if method & m:
                self.__shandlers[code][m] = fn
    # enddef

    def pop_http_state(self, code, method):
        """Pop handerl for http state and method.

        As Application.pop_route, for pop multimethod handler, you must call
        pop_http_state for each method.
        """
        handlers = self.__shandlers(code, {})
        return handlers.pop(method)

    def error_from_table(self, req, code):
        """Internal method, which is called if error was accured.

        If status code is in Application.shandlers (fill with http_state
        function), call this handler.
        """
        if code in self.__shandlers \
                and req.method_number in self.__shandlers[code]:
            try:
                handler = self.__shandlers[code][req.method_number]
                if 'uri_handler' not in req.__dict__:
                    req.uri_rule = '_%d_error_handler_' % code
                    req.uri_handler = handler
                self.handler_from_pre(req)       # call pre handlers now
                handler(req)
            except:
                internal_server_error(req)
        elif code in default_shandlers:
            handler = default_shandlers[code][METHOD_GET]
            handler(req)
        else:
            not_implemented(req, code)
    # enddef

    def handler_from_default(self, req):
        """Internal method, which is called if no handler is found."""
        if req.method_number in self.__dhandlers:
            req.uri_rule = '_default_handler_'
            req.uri_handler = self.__dhandlers[req.method_number]
            self.handler_from_pre(req)       # call pre handlers now
            retval = self.__dhandlers[req.method_number](req)
            if retval != DECLINED:
                raise SERVER_RETURN(retval)
    # enddef

    def handler_from_pre(self, req):
        """Internal method, which run all pre (pre_proccess) handlers.

        This method was call before end-point route handler.
        """
        for fn in self.__pre:
            fn(req)

    def handler_from_table(self, req):
        """Call right handler from handlers table (fill with route function).

        If no handler is fined, try to find directory or file if Document Root,
        resp. Document Index is set. Then try to call default handler for right
        method or call handler for status code 404 - not found.
        """

        # static routes
        if req.uri in self.__handlers:
            if req.method_number in self.__handlers[req.uri]:
                handler = self.__handlers[req.uri][req.method_number]
                req.uri_rule = req.uri      # nice variable for pre handlers
                req.uri_handler = handler
                self.handler_from_pre(req)  # call pre handlers now
                retval = handler(req)       # call right handler now
                # return text is allowed
                if isinstance(retval, str) \
                        or (_unicode_exist and isinstance(retval, unicode)):
                    req.write(retval, 1)    # write data and flush
                    retval = DONE
                if retval != DECLINED:
                    raise SERVER_RETURN(retval or DONE)  # could be state.DONE
            else:
                raise SERVER_RETURN(HTTP_METHOD_NOT_ALLOWED)
            # endif
        # endif

        # regular expression
        for ruri in self.__rhandlers.keys():
            match = ruri.match(req.uri)
            if match and req.method_number in self.__rhandlers[ruri]:
                handler, convertors = self.__rhandlers[ruri][req.method_number]
                req.uri_rule = ruri.pattern  # nice variable for pre handlers
                req.uri_handler = handler
                self.handler_from_pre(req)   # call pre handlers now
                if len(convertors):
                    # create OrderedDict from match insead of dict for
                    # convertors applying
                    req.groups = OrderedDict(
                        (g, c(v))for ((g, c), v) in zip(convertors,
                                                        match.groups()))
                    retval = handler(req, *req.groups.values())
                else:
                    req.groups = match.groupdict()
                    retval = handler(req, *match.groups())
                # return text is allowed
                if isinstance(retval, str) \
                        or (_unicode_exist and isinstance(retval, unicode)):
                    req.write(retval, 1)    # write data and flush
                    retval = DONE
                if retval != DECLINED:
                    raise SERVER_RETURN(retval or DONE)  # could be state.DONE
            # endif - no METHOD_NOT_ALLOWED here
        # endfor

        # try file or index
        if req.document_root():
            rfile = "%s%s" % (uni(req.document_root()),
                              path.normpath("%s" % uni(req.uri)))

            if not path.exists(rfile):
                if req.debug and req.uri == '/debug-info':      # work if debug
                    req.uri_rule = '_debug_info_'
                    req.uri_handler = debug_info
                    self.handler_from_pre(req)  # call pre handlers now
                    raise SERVER_RETURN(debug_info(req, self))
                self.handler_from_default(req)                  # try default
                raise SERVER_RETURN(HTTP_NOT_FOUND)             # not found

            # return file
            if path.isfile(rfile) and access(rfile, R_OK):
                req.uri_rule = '_send_file_'
                req.uri_handler = send_file
                self.handler_from_pre(req)      # call pre handlers now
                req.log_error("Return file: %s" % req.uri, LOG_INFO)
                raise SERVER_RETURN(send_file(req, rfile))

            # return directory index
            if req.document_index and path.isdir(rfile) \
                    and access(rfile, R_OK):
                req.log_error("Return directory: %s" % req.uri, LOG_INFO)
                req.uri_rule = '_directory_index_'
                req.uri_handler = directory_index
                self.handler_from_pre(req)      # call pre handlers now
                raise SERVER_RETURN(directory_index(req, rfile))

            raise SERVER_RETURN(HTTP_FORBIDDEN)
        # endif

        if req.debug and req.uri == '/debug-info':
            req.uri_rule = '_debug_info_'
            req.uri_handler = debug_info
            self.handler_from_pre(req)          # call pre handlers now
            raise SERVER_RETURN(debug_info(req, self))

        self.handler_from_default(req)

        req.log_error("404 Not Found: %s" % req.uri, LOG_ERR)
        raise SERVER_RETURN(HTTP_NOT_FOUND)
    # enddef

    def __request__(self, environ, start_response):
        """Create Request instance and return wsgi response.

        This method create Request object, call handlers from
        Application.__pre (Application.handler_from_pre),
        uri handler (handler_from_table), default handler
        (Application.handler_from_default) or error handler
        (Application.error_from_table), and handlers from
        Application.__post.
        """
        req = Request(environ, start_response, self.__config)

        try:
            self.handler_from_table(req)
        except SERVER_RETURN as e:
            code = e.args[0]
            if code in (OK, HTTP_OK, DONE):
                pass
            # XXX: elif code in (HTTP_MOVED_PERMANENTLY,
            #                    HTTP_MOVED_TEMPORARILY):
            else:
                req.status = code
                self.error_from_table(req, code)
        except (BrokenClientConnection, SystemExit) as e:
            req.log_error(str(e), LOG_ERR)
            req.log_error('   ***   You shoud ignore next error   ***',
                          LOG_ERR)
            return ()
        except:
            self.error_from_table(req, 500)
        # endtry

        try:    # call post_process handler
            for fn in self.__post:
                fn(req)
        except:
            self.error_from_table(req, 500)
        # endtry

        return req.__end_of_request__()    # private call of request
    # enddef

    def __call__(self, environ, start_response):
        """Callable define for Application instance.

        This method run __request__ method.
        """
        if self.__name == '__poorwsgi__':
            stderr.write("[W] Using deprecated instance of Application.\n")
            stderr.write("    Please, create your own instance\n")
            stderr.flush()
        return self.__request__(environ, start_response)

    def __profile_request__(self, environ, start_response):
        """Profiler version of __request__.

        This method is used if set_profile is used."""
        def wrapper(rv):
            rv.append(self.__original_request__(environ, start_response))

        rv = []
        uri_dump = (self._dump + environ.get('PATH_INFO').replace('/', '_')
                    + '.profile')
        self.log_error('Generate %s' % uri_dump, LOG_INFO)
        self._runctx('wrapper(rv)', globals(), locals(), filename=uri_dump)
        return rv[0]
    # enddef

    def __repr__(self):
        return '%s - callable Application class instance' % self.__name

    def set_profile(self, runctx, dump):
        """Set profiler for __call__ function.

        Arguments:
            runctx - function from profiler module
            dump - path and prefix for .profile files

        Typical usage:

            import cProfile

            cProfile.runctx('from simple import *', globals(), locals(),
                            filename="log/init.profile")
            app.set_profile(cProfile.runctx, 'log/req')
        """
        self._runctx = runctx
        self._dump = dump

        self.__original_request__ = self.__request__
        self.__request__ = self.__profile_request__
    # enddef

    def del_profile(self):
        """Remove profiler from application."""
        self.__request__ = self.__original_request__

    def get_options(self):
        """Returns dictionary with application variables from system environment.

        Application variables start with {app_} prefix,
        but in returned dictionary is set without this prefix.

            #!ini
            poor_LogLevel = warn        # Poor WSGI variable
            app_db_server = localhost   # application variable db_server
            app_templates = app/templ   # application variable templates

        This method works like Request.get_options, but work with
        os.environ, so it works only with wsgi servers, which set not only
        request environ, but os.environ too. Apaches mod_wsgi don't do that,
        uWsgi and PoorHTTP do that.
        """
        options = {}
        for key, val in environ.items():
            key = key.strip()
            if key[:4].lower() == 'app_':
                options[key[4:].lower()] = val.strip()
        return options
    # enddef

    def log_error(self, message, level=LOG_ERR):
        """Logging method with the same functionality like in Request object.

        But as get_options read configuration from os.environ which could
        not work in same wsgi servers like Apaches mod_wsgi.

        This method write to stderr so messages, could not be found in
        servers error log!
        """
        if self.__log_level[0] >= level[0]:
            if _unicode_exist and isinstance(message, unicode):
                message = message.encode('utf-8')
            try:
                stderr.write("<%s> [%s] %s\n" % (level[1], self.__name,
                                                 message))
            except UnicodeEncodeError:
                if _unicode_exist:
                    message = message.decode('utf-8').encode(
                        'ascii', 'backslashreplace')
                else:
                    message = message.encode(
                        'ascii', 'backslashreplace').decode('ascii')

                stderr.write("<%s> [%s] %s\n" % (level[1], self.__name,
                                                 message))
            stderr.flush()
    # enddef

    def log_info(self, message):
        """Logging method, which create message as LOG_INFO level."""
        self.log_error(message, LOG_INFO)

    def log_debug(self, message):
        """Logging method, which create message as LOG_DEBUG level."""
        self.log_error(message, LOG_DEBUG)

    def log_warning(self, message):
        """Logging method, which create message as LOG_WARNING level."""
        self.log_error(message, LOG_WARNING)
    def getlvl0data(self, raw, query=False):
        '''Level zero presents each year with total file sizes
            raw data for this method should be the master list of files'''

        selection = raw[query]

        # Iterate through the master list and get max/min sizes
        newraw = OrderedDict({})
        smax, smin = selection[0][4], selection[0][4]

        for file in selection:
            size = file[4]
            if size > smax:
                smax = size
            elif size < smin:
                smin = size

        # Create the bins with size ranges
        bincount = 10
        binsize = (smax - smin) / 10.00
        low = smin
        bindata = OrderedDict({})  # captures the number of files per bin
        binlimits = OrderedDict({})  # holds the upper size limit for each bin

        for bin in range(bincount):
            high = low + binsize
            label = '%f-%f' % (low, high)
            binlimits[label] = high
            low = high

        # Reiterate and get the no of files per bin
        newraw = binlimits.copy()  # raw data for use in next level
        bindata = binlimits.copy()
        for bin in newraw:
            newraw[bin] = []  # empty the copy
        for bin in bindata:
            bindata[bin] = 0  # empty the copy

        for file in selection:
            size = file[4]
            for bin in binlimits:
                if size < binlimits[bin]:
                    newraw[bin].append(file)
                    bindata[bin] += 1
                    break  # sue me

        print smin, smax, '\n'
        for item in binlimits:
            print item
        print '\n'
        if self.level > 0: print selection

        # Iterate through the raw list and group the data by year
        data = bindata.copy()

        # save the raw data for this level for reverse navigation
        self.rawhistory[self.level] = raw.copy()

        # Save the raw that level 1 will use to sort itself out
        self.updateRaw(newraw)
        return data
Exemple #14
0
class Atoms:
    "Class to deal with a single frame of an xyz movie"

    def __init__(self, filename=None, *allocargs, **allockwargs):

        self._atomsptr = None
        self.alloc(*allocargs, **allockwargs)

        if filename is not None:
            self.read(filename)

    def alloc(self, n=0, n_int=0, n_real=3, n_str=1, n_logical=0, use_libatoms=False, atomsptr=None, properties=None, \
                 lattice=numpy.array([[100.,0.,0.],[0.,100.,0.],[0.,0.,100.]]), \
                 params=ParamReader(),element='Si'):

        if use_libatoms or atomsptr is not None:
            if atomsptr is None:
                self.attach(libatoms.atoms_initialise(n, lattice))
            else:
                self.attach(atomsptr)
        else:
            self.n = n
            self.lattice = lattice
            self.g = numpy.linalg.inv(self.lattice)
            self.params = params

            # Create single property for atomic positions
            self.real = numpy.zeros((self.n, n_real), dtype=float)
            self.int = numpy.zeros((self.n, n_int), dtype=int)
            self.str = numpy.zeros((self.n, n_str), dtype='S10')
            self.logical = numpy.zeros((self.n, n_logical), dtype=bool)

            if properties is None:
                self.properties = OrderedDict({
                    'species': ('S', slice(0, 1)),
                    'pos': ('R', slice(0, 3))
                })
            else:
                self.properties = properties

            self.repoint()

    def attach(self, atomsptr):
        self.finalise()
        self._atomsptr = atomsptr

        self.n, n_int, n_real, n_str, n_logical, iloc, rloc, sloc, lloc, latticeloc, gloc = \
                   libatoms.atoms_get_data(self._atomsptr)

        self.int = arraydata((self.n, n_int), int, iloc)
        self.real = arraydata((self.n, n_real), float, rloc)
        self.str = arraydata((self.n, n_str), 'S10', sloc)
        self.logical = arraydata((self.n, n_logical), bool, sloc)

        self.lattice = arraydata((3, 3), float, latticeloc)
        self.g = arraydata((3, 3), float, gloc)

        self.params = {}

        property_code_map = {1: 'I', 2: 'R', 3: 'S', 4: 'L'}
        self.properties = OrderedDict()
        for i in range(libatoms.atoms_n_properties(self._atomsptr)):
            key, (code, startcol, stopcol) = libatoms.atoms_nth_property(
                self._atomsptr, i + 1)
            self.properties[key.strip()] = (property_code_map[code],
                                            slice(startcol - 1, stopcol))

        self.repoint()

    def finalise(self):
        if self._atomsptr is not None:
            libatoms.atoms_finalise(self._atomsptr)
            self._atomsptr = None

    def __repr__(self):
        return 'Atoms(n=%d, properties=%s, params=%s, lattice=%s)' % \
              (self.n, repr(self.properties), repr(self.params), repr(self.lattice))

    def __cmp__(self, other):
        if other is None:
            return 1

        # Quick checks
        if (self.n != other.n) or (self.comment() != other.comment()):
            return 1

        # Check if arrays match one by one
        for this, that in \
            (self.lattice, other.lattice), \
            (self.real, other.real), (self.int, other.int), \
            (self.str, other.str), (self.logical, other.logical):

            if (not numpy.all(this == that)):
                return 1

        return 0

    def update(self, other):
        "Overwrite contents of this Atoms object with a copy of an other"
        self.n = other.n
        self.lattice = other.lattice.copy()
        self.g = other.g.copy()
        self.params = other.params.copy()
        self.properties = other.properties.copy()

        self.real = other.real[:]
        self.int = other.int[:]
        self.str = other.str[:]
        self.logical = other.logical[:]

        self.repoint()

    def add_property(self, name, value, ncols=1):
        "Add a new property to this Atoms object. Value can be a scalar int or float, or an array."

        # Scalar int or list of all ints
        if (type(value) == type(0)) or \
               ((type(value) == type([])) and numpy.all(numpy.array(map(type,value)) == type(0))):
            n_int = self.int.shape[1]
            intcopy = self.int.copy()
            self.int = numpy.zeros((self.n, n_int + ncols), dtype=int)
            self.int[:, :n_int] = intcopy
            if ncols == 1:
                self.int[:, n_int] = value
            else:
                self.int[:, n_int:n_int + ncols] = value
            self.properties[name] = ('I', slice(n_int, n_int + ncols))
            self.repoint()

        # Scalar real or list of all reals
        elif (type(value) == type(0.0)) or \
                 (type(value) == type([]) and numpy.all(numpy.array(map(type,value)) == type(0.0))):
            n_real = self.real.shape[1]
            realcopy = self.real.copy()
            self.real = numpy.zeros((self.n, n_real + ncols), dtype=float)
            self.real[:, :n_real] = realcopy
            if ncols == 1:
                self.real[:, n_real] = value
            else:
                self.real[:, n_real:n_real + ncols] = value
            self.properties[name] = ('R', slice(n_real, n_real + ncols))
            self.repoint()

        # Scalar string or list of strings
        elif (type(value) == type('')) or \
               ((type(value) == type([])) and numpy.all(numpy.array(map(type,value)) == type(''))):
            n_str = self.str.shape[1]
            strcopy = self.str.copy()
            self.str = numpy.zeros((self.n, n_str + ncols), dtype='S10')
            self.str[:, :n_str] = strcopy
            if ncols == 1:
                self.str[:, n_str] = value
            else:
                self.str[:, n_str:n_str + ncols] = value
            self.properties[name] = ('S', slice(n_str, n_str + ncols))
            self.repoint()

        # Scalar logical or list of logicals
        elif (type(value) == type(False)) or \
               ((type(value) == type([])) and numpy.all(numpy.array(map(type,value)) == type(False))):
            n_logical = self.logical.shape[1]
            logicalcopy = self.logical.copy()
            self.logical = numpy.zeros((self.n, n_logical + ncols), dtype=bool)
            self.logical[:, :n_logical] = logicalcopy
            if ncols == 1:
                self.logical[:, n_logical] = value
            else:
                self.logical[:, n_logical:n_logical + ncols] = value
            self.properties[name] = ('L', slice(n_logical, n_logical + ncols))
            self.repoint()

        # Array type
        elif type(value) == type(numpy.array([])):
            if value.shape[0] != self.n:
                raise ValueError('length of value array (%d) != number of atoms (%d)' % \
                                 (value.shape[0],self.n))

            if value.dtype.kind == 'f':
                try:
                    ncols = value.shape[1]
                except IndexError:
                    ncols = 1
                n_real = self.real.shape[1]
                realcopy = self.real.copy()
                self.real = numpy.zeros((self.n, n_real + ncols), dtype=float)
                self.real[:, :n_real] = realcopy
                if ncols == 1:
                    self.real[:, n_real] = value.copy()
                else:
                    self.real[:, n_real:n_real + ncols] = value.copy()
                self.properties[name] = ('R', slice(n_real, n_real + ncols))
                self.repoint()
            elif value.dtype.kind == 'i':
                try:
                    ncols = value.shape[1]
                except IndexError:
                    ncols = 1
                n_int = self.int.shape[1]
                intcopy = self.int.copy()
                self.int = numpy.zeros((self.n, n_int + ncols), dtype=int)
                self.int[:, :n_int] = intcopy

                if ncols == 1:
                    self.int[:, n_int] = value.copy()
                else:
                    self.int[:, n_int:n_int + ncols] = value.copy()
                self.properties[name] = ('I', slice(n_int, n_int + ncols))
                self.repoint()

            elif value.dtype.kind == 'S':
                try:
                    ncols = value.shape[1]
                except IndexError:
                    ncols = 1
                n_str = self.str.shape[1]
                strcopy = self.str.copy()
                self.str = numpy.zeros((self.n, n_str + ncols), dtype='S10')
                self.str[:, :n_str] = strcopy

                if ncols == 1:
                    self.str[:, n_str] = value.copy()
                else:
                    self.str[:, n_str:n_str + ncols] = value.copy()
                self.properties[name] = ('S', slice(n_str, n_str + ncols))
                self.repoint()

            elif value.dtype == numpy.dtype('bool'):
                try:
                    ncols = value.shape[1]
                except IndexError:
                    ncols = 1
                n_logical = self.logical.shape[1]
                logicalcopy = self.logical.copy()
                self.logical = numpy.zeros((self.n, n_logical + ncols),
                                           dtype=numpy.dtype('bool'))
                self.logical[:, :n_logical] = logicalcopy

                if ncols == 1:
                    self.logical[:, n_logical] = value.copy()
                else:
                    self.logical[:, n_logical:n_logical + ncols] = value.copy()
                self.properties[name] = ('S',
                                         slice(n_logical, n_logical + ncols))
                self.repoint()

            else:
                raise ValueError(
                    "Don't know how to add array property of type %r" %
                    value.dtype)

        else:
            raise ValueError("Don't know how to add property of type %r" %
                             type(value))

    def repoint(self):
        "Make pointers to columns in real and int"

        for prop, (ptype, cols) in self.properties.items():
            if ptype == 'R':
                if cols.stop - cols.start == 1:
                    setattr(self, prop, self.real[:, cols.start])
                else:
                    setattr(self, prop, self.real[:, cols])
            elif ptype == 'I':
                if cols.stop - cols.start == 1:
                    setattr(self, prop, self.int[:, cols.start])
                else:
                    setattr(self, prop, self.int[:, cols])
            elif ptype == 'S':
                if cols.stop - cols.start == 1:
                    setattr(self, prop, self.str[:, cols.start])
                else:
                    setattr(self, prop, self.str[:, cols])
            elif ptype == 'L':
                if cols.stop - cols.start == 1:
                    setattr(self, prop, self.logical[:, cols.start])
                else:
                    setattr(self, prop, self.logical[:, cols])
            else:
                raise ValueError('Bad property type :' +
                                 str(self.properties[prop]))

    def comment(self, properties=None):
        "Return the comment line for this Atoms object"

        if properties is None:
            props = self.properties.keys()
        else:
            props = properties

        lattice_str = 'Lattice="' + ' '.join(
            map(str, numpy.reshape(self.lattice, 9))) + '"'

        props_str = 'Properties=' + ':'.join(map(':'.join, \
                 zip(props, \
                     [self.properties[k][0] for k in props], \
                     [str(self.properties[k][1].stop-self.properties[k][1].start) for k in props])))

        return lattice_str + ' ' + props_str + ' ' + str(self.params)

    def _props_dtype(self, props=None):
        "Return a record array dtype for the specified properties (default all)"

        if props is None:
            props = self.properties.keys()

        result = []
        fmt_map = {'R': 'd', 'I': 'i', 'S': 'S10', 'L': 'bool'}

        for prop in props:
            ptype, cols = self.properties[prop]
            if cols.start == cols.stop - 1:
                result.append((prop, fmt_map[ptype]))
            else:
                for c in range(cols.stop - cols.start):
                    result.append((prop + str(c), fmt_map[ptype]))

        return numpy.dtype(result)

    def to_recarray(self, props=None):
        "Return a record array contains specified properties in order (defaults to all properties)"

        if props is None:
            props = self.properties.keys()

        # Create empty record array with correct dtype
        data = numpy.zeros(self.n, self._props_dtype(props))

        # Copy cols from self.real and self.int into data recarray
        for prop in props:
            ptype, cols = self.properties[prop]
            if ptype == 'R':
                if cols.start == cols.stop - 1:
                    data[prop] = self.real[:, cols.start]
                else:
                    for c in range(cols.stop - cols.start):
                        data[prop + str(c)] = self.real[:, cols.start + c]
            elif ptype == 'I':
                if cols.start == cols.stop - 1:
                    data[prop] = self.int[:, cols.start]
                else:
                    for c in range(cols.stop - cols.start):
                        data[prop + str(c)] = self.int[:, cols.start + c]
            elif ptype == 'S':
                if cols.start == cols.stop - 1:
                    data[prop] = self.str[:, cols.start]
                else:
                    for c in range(cols.stop - cols.start):
                        data[prop + str(c)] = self.str[:, cols.start + c]
            elif ptype == 'L':
                if cols.start == cols.stop - 1:
                    data[prop] = self.logical[:, cols.start]
                else:
                    for c in range(cols.stop - cols.start):
                        data[prop + str(c)] = self.logical[:, cols.start + c]
            else:
                raise ValueError('Bad property type :' +
                                 str(self.properties[prop][1]))

        return data

    def update_from_recarray(self, data, props=None):
        """Update Atoms data from a record array. By default all properties
      are updated; use the props argument to update only a subset"""

        if props is None:
            props = self.properties.keys()

        if data.dtype != self._props_dtype(props) or data.shape != (self.n, ):
            raise ValueError('Data shape is incorrect')

        # Copy cols from data recarray into self.real and self.int
        for prop in props:
            ptype, cols = self.properties[prop]
            if ptype == 'R':
                if cols.start == cols.stop - 1:
                    self.real[:, cols.start] = data[prop]
                else:
                    for c in range(cols.stop - cols.start):
                        self.real[:, cols.start + c] = data[prop + str(c)]
            elif ptype == 'I':
                if cols.start == cols.stop - 1:
                    self.int[:, cols.start] = data[prop]
                else:
                    for c in range(cols.stop - cols.start):
                        self.int[:, cols.start + c] = data[prop + str(c)]
            elif ptype == 'S':
                if cols.start == cols.stop - 1:
                    self.str[:, cols.start] = data[prop]
                else:
                    for c in range(cols.stop - cols.start):
                        self.str[:, cols.start + c] = data[prop + str(c)]
            elif ptype == 'L':
                if cols.start == cols.stop - 1:
                    self.logical[:, cols.start] = data[prop]
                else:
                    for c in range(cols.stop - cols.start):
                        self.logical[:, cols.start + c] = data[prop + str(c)]
            else:
                raise ValueError('Bad property type :' +
                                 str(self.properties[prop][1]))

    def read_xyz(self, xyz):
        "Read from extended XYZ filename or open file."

        opened = False
        if type(xyz) == type(''):
            xyz = open(xyz, 'r')
            opened = True

        line = xyz.next()
        if not line: return False

        n = int(line.strip())
        comment = (xyz.next()).strip()

        # Parse comment line
        params = ParamReader(comment)

        if not 'Properties' in params:
            raise ValueError('Properties missing from comment line')

        properties, n_int, n_real, n_str, n_logical = _parse_properties(
            params['Properties'])
        del params['Properties']

        # Get lattice
        if not 'Lattice' in params:
            raise ValueError('No lattice found in xyz file')

        lattice = numpy.reshape(params['Lattice'], (3, 3))
        del params['Lattice']

        self.alloc(n=n,lattice=lattice,properties=properties,params=params,\
                   n_int=n_int,n_real=n_real,n_str=n_str,n_logical=n_logical)

        props_dtype = self._props_dtype()

        converters = [_getconv(props_dtype.fields[name][0]) \
                      for name in props_dtype.names]

        X = []
        for i, line in enumerate(xyz):
            vals = line.split()
            row = tuple([converters[j](val) for j, val in enumerate(vals)])
            X.append(row)
            if i == self.n - 1: break  # Only read self.n lines

        try:
            data = numpy.array(X, props_dtype)
        except TypeError:
            raise IOError('End of file reached before end of frame')

        if opened: xyz.close()

        try:
            self.update_from_recarray(data)
        except ValueError:
            # got a partial frame, must be end of file
            return False
        else:
            return True

    def read_netcdf(self, fname, frame=0):
        from pupynere import netcdf_file

        nc = netcdf_file(fname)

        self.n = nc.dimensions['atom']
        self.lattice = make_lattice(nc.variables['cell_lengths'][frame],
                                    nc.variables['cell_angles'][frame])
        self.g = numpy.linalg.inv(self.lattice)
        self.params = OrderedDict()
        self.properties = OrderedDict()

        self.real = numpy.zeros((self.n, 0), dtype=float)
        self.int = numpy.zeros((self.n, 0), dtype=int)
        self.str = numpy.zeros((self.n, 0), dtype='S10')
        self.logical = numpy.zeros((self.n, 0), dtype=bool)

        vars = nc.variables.keys()
        vars = filter(lambda v: not v in ('cell_angles', 'cell_lengths'), vars)

        # ensure first var is species and second positions
        sp = vars.index('species')
        if sp != 0:
            vars[sp], vars[0] = vars[0], vars[sp]
        pos = vars.index('coordinates')
        if pos != 1:
            vars[pos], vars[1] = vars[1], vars[pos]

        for v in vars:
            d = nc.variables[v].dimensions

            if d[0] != 'frame': continue

            value = nc.variables[v][frame]
            if value.dtype == numpy.dtype('|S1'):
                value = [''.join(x).strip() for x in value]

            if len(d) == 1 or (len(d) == 2 and d[1] in ('label', 'string')):
                if (len(d) == 2 and d[1] in ('label', 'string')):
                    value = ''.join(value)
                self.params[v] = value
            else:
                # Name mangling
                if v == 'coordinates':
                    p = 'pos'
                elif v == 'velocities':
                    p = 'velo'
                else:
                    p = v
                value = nc.variables[v][frame]
                if value.dtype == numpy.dtype('|S1'):
                    value = [''.join(x).strip() for x in value]
                self.add_property(p, value)

    def write_xyz(self, xyz=sys.stdout, properties=None):
        "Write atoms in extended XYZ format. xyz can be a filename or open file"

        if properties is None:
            # Sort by original order
            props = self.properties.keys()
        else:
            props = properties

        species = getattr(self, props[0])
        if len(species.shape) != 1 or species.dtype.kind != 'S':
            raise ValueError('First property must be species like')

        pos = getattr(self, props[1])
        if pos.shape[1] != 3 or pos.dtype.kind != 'f':
            raise ValueError('Second property must be position like')

        data = self.to_recarray(props)
        format = ''.join(
            [_getfmt(data.dtype.fields[name][0])
             for name in data.dtype.names]) + '\n'

        opened = False
        if type(xyz) == type(''):
            xyz = open(xyz, 'w')
            opened = True

        xyz.write('%d\n' % self.n)
        xyz.write(self.comment(properties) + '\n')
        for i in range(self.n):
            xyz.write(format % tuple(data[i]))

        if opened: xyz.close()

    def read_cell(self, cell):
        "Read atoms from a CastepCell object or file"

        if hasattr(cell, 'next'):  # looks like a file
            cell = castep.CastepCell(cell)

        self.update(cell.to_atoms())

    def write_cell(self, fname):
        "Write Atoms to a cell file"

        cell = castep.CastepCell()
        cell.update_from_atoms(self)
        cell.write(fname)

    def read_geom(self, geom):
        "Read from a CASTEP .geom file"
        self.update(castep.read_geom(geom))

    def read_castep(self, castepfile):
        "Read from a .castep output file"

        if self.n != 0:
            self.update(
                castep.read_castep_output(castepfile, self, abort=False))
        else:
            self.update(castep.read_castep_output(castepfile, abort=False))

    def read(self, fname, filetype=None):
        "Attempt to guess type of file from extension and call appropriate read method"

        opened = False
        if type(fname) == type(''):
            if fname.endswith('.gz'):
                import gzip
                fh = gzip.open(fname)
                fname = fname[:-3]  # remove .gz
            elif fname.endswith('.nc'):
                fh = fname
            else:
                fh = open(fname, 'r')
                opened = True

            # Guess file type from extension
            if filetype is None:
                root, filetype = os.path.splitext(fname)
                filetype = filetype[1:]  # remove '.'
        else:
            fh = fname

        # Default to xyz format
        if not filetype in ['cell', 'geom', 'xyz', 'castep', 'nc']:
            filetype = 'xyz'

        if filetype == 'xyz':
            self.read_xyz(fh)
        elif filetype == 'cell':
            self.read_cell(fh)
        elif filetype == 'geom':
            self.read_geom(fh)
        elif filetype == 'castep':
            self.read_castep(fh)
        elif filetype == 'nc':
            self.read_netcdf(fh)

        if opened: fh.close()

    def write(self, fname, filetype=None):
        opened = False
        if type(fname) == type(''):
            if fname.endswith('.gz'):
                import gzip
                fh = gzip.open(fname, 'w')
                fname = fname[:-3]  # remove .gz
            else:
                fh = open(fname, 'w')

            # Guess file type from extension
            if filetype is None:
                root, filetype = os.path.splitext(fname)
                filetype = filetype[1:]  # remove '.'
            opened = True
        else:
            fh = fname

        # Default to xyz format
        if not filetype in ['xyz', 'cfg', 'cell']:
            filetype = 'xyz'

        if filetype == 'xyz':
            self.write_xyz(fh)
        elif filetype == 'cfg':
            self.write_cfg(fh)
        elif filetype == 'cell':
            self.write_cell(fh)

        if opened: fh.close()

    def write_cfg(self,
                  cfg=sys.stdout,
                  shift=numpy.array([0., 0., 0.]),
                  properties=None):
        """Write atoms in AtomEye extended CFG format. Returns a list of auxiliary properties
      actually written to CFG file, which may be abbreviated compared to those requested since
      AtomEye has a maximum of 32 aux props."""

        opened = False
        if type(cfg) == type(''):
            cfg = open(cfg, 'w')
            opened = True

        if properties is None:
            properties = self.properties.keys()

        # Header line
        cfg.write('Number of particles = %d\n' % self.n)
        cfg.write('# ' + self.comment(properties) + '\n')

        # Lattice vectors
        for i in 0, 1, 2:
            for j in 0, 1, 2:
                cfg.write('H0(%d,%d) = %16.8f\n' %
                          (i + 1, j + 1, self.lattice[i, j]))

        cfg.write('.NO_VELOCITY.\n')

        # Check first property is position-like
        species = getattr(self, properties[0])
        if len(species.shape) != 1 or species.dtype.kind != 'S':
            raise ValueError('First property must be species like')

        pos = getattr(self, properties[1])
        if pos.shape[1] != 3 or pos.dtype.kind != 'f':
            raise ValueError('Second property must be position like')

        if not self.properties.has_key('frac_pos'):
            self.add_property('frac_pos', 0.0, ncols=3)
        self.frac_pos[:] = numpy.array(
            [numpy.dot(pos[i, :], self.g) + shift for i in range(self.n)])

        if not self.properties.has_key('mass'):
            self.add_property('mass', map(ElementMass.get, self.species))

        properties = filter(
            lambda p: p not in ('pos', 'frac_pos', 'mass', 'species'),
            properties)

        # AtomEye can handle a maximum of 32 columns, so we might have to throw away
        # some of the less interesting propeeties

        def count_cols():
            n_aux = 0
            for p in properties:
                s = getattr(self, p).shape
                if len(s) == 1: n_aux += 1
                else: n_aux += s[1]
            return n_aux

        boring_properties = ['travel', 'avgpos', 'oldpos', 'acc', 'velo']
        while count_cols() > 32:
            if len(boring_properties) == 0:
                raise ValueError('No boring properties left!')
            try:
                next_most_boring = boring_properties.pop(0)
                del properties[properties.index(next_most_boring)]
            except IndexError:
                pass  # this boring property isn't in the list: move on to next

        properties = ['species', 'mass', 'frac_pos'] + properties
        data = self.to_recarray(properties)

        cfg.write('entry_count = %d\n' % (len(data.dtype.names) - 2))

        # 3 lines per atom: element name, mass and other data
        format = '%s\n%12.4f\n'
        for i, name in enumerate(data.dtype.names[2:]):
            if i > 2: cfg.write('auxiliary[%d] = %s\n' % (i - 3, name))
            format = format + _getfmt(data.dtype.fields[name][0])
        format = format + '\n'

        for i in range(self.n):
            cfg.write(format % tuple(data[i]))

        if opened: cfg.close()

        # Return column names as a list
        return list(data.dtype.names)

    def filter(self, mask):
        "Return smaller Atoms with only the elements where mask is true"

        other = Atoms()

        if mask is None:
            mask = numpy.zeros((self.n, ), numpy.bool)
            mask[:] = True

        other.n = count(mask)
        other.lattice = self.lattice.copy()
        other.g = self.g.copy()
        other.params = self.params.copy()
        other.properties = self.properties.copy()

        other.real = self.real[mask]
        other.int = self.int[mask]
        other.str = self.str[mask]
        other.logical = self.logical[mask]

        other.repoint()

        return other

    def copy(self):
        if self.n == 0:
            return Atoms()
        else:
            return self.filter(mask=None)

    def add(self, newpos, newspecies):

        if type(newpos) == type([]):
            newpos = numpy.array(newpos)

        if len(newpos.shape) == 1:
            n_new = 1
        else:
            n_new = newpos.shape[0]

        oldn = self.n
        self.n = self.n + n_new

        self.real = numpy.resize(self.real, (self.n, self.real.shape[1]))
        self.int = numpy.resize(self.int, (self.n, self.int.shape[1]))
        self.str = numpy.resize(self.str, (self.n, self.str.shape[1]))
        self.logical = numpy.resize(self.logical,
                                    (self.n, self.logical.shape[1]))

        self.repoint()

        self.pos[oldn:self.n] = newpos
        self.species[oldn:self.n] = newspecies

    def remove(self, discard):
        keep = [i for i in range(self.n) if not i in discard]

        self.n = len(keep)
        self.real = self.real[keep]
        self.int = self.int[keep]
        self.str = self.str[keep]
        self.logical = self.logical[keep]
        self.repoint()

    def supercell(self, n1, n2, n3):

        other = Atoms(n=self.n*n1*n2*n3,n_int=self.int.shape[1],\
                      n_real=self.real.shape[1], \
                      properties=self.properties.copy())

        other.lattice[0, :] = self.lattice[0, :] * n1
        other.lattice[1, :] = self.lattice[1, :] * n2
        other.lattice[2, :] = self.lattice[2, :] * n3
        other.g = numpy.linalg.inv(other.lattice)

        for i in range(n1):
            for j in range(n2):
                for k in range(n3):
                    p = numpy.dot(self.lattice, numpy.array([i, j, k]))
                    for n in range(self.n):
                        nn = ((i * n2 + j) * n3 + k) * self.n + n
                        other.int[nn, :] = self.int[n, :]
                        other.real[nn, :] = self.real[n, :]
                        other.logical[nn, :] = self.logical[n, :]
                        other.str[nn, :] = self.str[n, :]
                        other.pos[nn, :] = self.pos[n, :] + p

        other.repoint()
        return other

    def cell_volume(self):
        return abs(
            numpy.dot(numpy.cross(self.lattice[0, :], self.lattice[1, :]),
                      self.lattice[2, :]))
Exemple #15
0
i = 0

#crear la tabla
table_def_items = []  # lista para concatenaciones de key y valor
for cat, value in categories.items():  # concatenaciones key y valor
    table_def_items.append(cat + ' ' + value)  # guardadaes en la lista
table_def = ', '.join(table_def_items)  # definicion de la tabla
#output.write("CREATE TABLE IF NOT EXISTS ptm_table (" + table_def + "); \n")  # guardar el CREATE en output
cur.execute("CREATE TABLE IF NOT EXISTS ptm_table (" + table_def + ") ENGINE=InnoDB")
con.commit()

#Defino un modelo de diccionario donde cargar los valores que voy a extraer de la lista
empty_record = OrderedDict()
for gato in categories:  # usando las keys de categories y un valor por defecto todo vacío no es nulo ¿cómo hago?
    empty_record[gato] = 'null'
record = empty_record.copy()  # este es el diccionario de registros vacío que voy a usar
# el copy es para que no me los enlace

line = ptmlist.readline()  # comienzo a leer lineas, asigno el valor de la primera a "line"
while line != '':  # mientras la linea no sea la "última", o sea, el fin del archivo.
    if line[:2] == '//':  # si la nueva linea es un separador de PTMs "//" hacer un INSERT
        # output.write(str(record.items()))
        sql_insert_values = '\'' + '\', \''.join(record.itervalues()) + '\''  # unir los elementos devalues con comas
        tgs = (((sql_insert_values.replace("'", "").replace(".", "")).split(", "))[3])
        tgs = tgs.split("-")
        #print(len(tgs))

        #output.write(("INSERT INTO ptm_table VALUES (%r);"
        #              % sql_insert_values + '\n').replace("\"", '').replace('.', ''))
        cur.execute(("INSERT INTO ptm_table VALUES (%r);"
                     % sql_insert_values + '\n').replace("\"", '').replace('.', ''))
class sizeNavMgr(object):
    """ A parent class that generates master data for use in size navigation.
    This object can only work after a database manager has been created.
    The main activity here is to group the master data by size and then navigate
    the data based on queries from visulization window.
    """
    def __init__(self, mgr):

        # Initialize variables
        self.initializevars(mgr)

    # ------------------------------------------------------
    # Methods

    def respond(self, query=None):
        '''accepts input from the parent parent and responds by
            returning the appropriate data'''

        # Choose the appropriate method and get new data
        if query == 'back' or query == 'fwd' or query == 'rst':
            self.level = self.mgr.navshistory[self.mgr.historypoint][1]
            self.query = None

        elif query == 'update':
            self.query = None
            if self.level < 0: self.level += 1  # hand first pass update

            # For SizeNavigator only: perform some pre-processing
            self.preprocess()

        else:
            self.level += 1
            query = self.labelmap[query]
            self.queryhistory[
                self.
                level] = query  # Pls remove the other one (in updatemgr) not this one
            self.query = query

            # check the local raw data and select the data for this query
            selection = self.getselection(query)

            # for SizeNavigator only: perform some pre-processing
            self.preprocess()

            # Pass the selection to the datamanager for iteration
            self.mgr.iteriselection(selection,
                                    2)  # 2 = SizeNavigator's call-sign

    def getselection(self, query):
        # Check the raw data
        if query == None:
            selection = self.selectionhistory[self.mgr.historypoint]
        else:
            selection = self.raw[query]
            self.updatemgr(query, selection)
        return selection

    def preprocess(self):
        if self.level == 0:
            smin, smax = self.limits[self.level][0], self.limits[self.level][1]
        else:
            query = self.queryhistory[self.level]
            smin, smax = self.processQuery(query)
            self.limits[self.level] = (smin, smax)

        smax += 10  #Hack to ensure biggest file falls into the mix

        # Create the bins with size ranges
        bincount = 10.00
        binsize = (smax - smin) / bincount
        low = smin
        self.bindata = OrderedDict({})  # captures the number of files per bin
        self.binlimits = OrderedDict(
            {})  # holds the upper size limit for each bin

        for bin in range(int(bincount)):
            high = low + binsize
            label = '%f-%f' % (low, high)
            self.binlimits[label] = high
            low = high

        # Reiterate and get the no of files per bin
        self.newraw = self.binlimits.copy()  # raw data for use in next level
        self.bindata = self.binlimits.copy()
        for bin in self.newraw:
            self.newraw[bin] = []  # empty the copy
        for bin in self.bindata:
            self.bindata[bin] = 0  # empty the copy

    def processfile(self, dfile):
        size = dfile[6]
        for hbin in self.binlimits:
            if size <= self.binlimits[hbin]:
                self.newraw[hbin].append(dfile)
                break  # sue me

    def postprocess(self):
        for hbin in self.newraw:
            self.bindata[hbin] = len(self.newraw[hbin])

        # Convert the labels and save a mapping
        data = self.processDataQueries(self.bindata)

        # Label processing before calling the visualizer
        for item in self.queryhistory:
            query = self.queryhistory[item]
            if item == 0: query = 'All'
            else: query = self.convertQuery(query)
            tag = 'Size Range'
            self.labels[item] = [tag, query]

        self.refreshWindows(data, self.level, self.labels)

    def convertQuery(self, query):
        query = query.split('-')
        low, high = float(query[0]), float(query[1])
        hunit, lunit = 'b', 'b'
        temp = [(1073741824, 'Gb'), (1048576, 'Mb'), (1024, 'Kb')]
        sizes = OrderedDict(temp)

        for item in sizes:
            if high > item and hunit == 'b':
                high = high / item
                hunit = sizes[item]
            if low > item and lunit == 'b':
                low = low / item
                lunit = sizes[item]

        low, high = round(low, 2), round(high, 2)
        return '> ' + str(low) + lunit + '\n to \n' + str(high) + hunit

    def processQuery(self, query):
        query = query.split('-')
        low, high = float(query[0]), float(query[1])

        return (low, high)

    def processDataQueries(self, data):
        self.labelmap = OrderedDict({})
        newdata = OrderedDict({})
        for label in data:
            newlabel = self.convertQuery(label)
            self.labelmap[newlabel] = label

        for label in self.labelmap:
            datum = data[self.labelmap[label]]
            newdata[label] = datum
        return newdata

    def refreshWindows(self, data, level, labels):
        self.mgr.sbook.spanel.win.reload(data, level, labels)

    def updateRaw(self):
        self.raw = self.newraw.copy()

    def updatemgr(self, query, selection):

        # Clear old selections from history
        cache = []
        for item in self.selectionhistory:
            if item > self.mgr.historypoint: cache.append(item)
        for item in cache:
            self.selectionhistory.pop(item)
            self.mgr.navsqueryhistory.pop(item)

        # Clear old queries from history
        cache = []
        for item in self.queryhistory:
            if item > self.level: cache.append(item)
        for item in cache:
            self.queryhistory.pop(item)
            self.labels.pop(item)
            self.limits.pop(item)

        self.mgr.historypoint += 1
        curr = self.mgr.historypoint
        prev = curr - 1

        # update the manager's query history here
        dquery, squery, tquery = self.mgr.navsqueryhistory[prev]
        squery = query
        self.mgr.navsqueryhistory[curr] = (dquery, squery, tquery)

        self.selectionhistory[self.mgr.historypoint] = selection[0:]
        self.mgr.sizequeryhist.append(query)
        self.queryhistory[self.level] = query

    def initializevars(self, mgr):
        self.query = 0
        self.type = 's'
        self.queryhistory = {0: 'All'}
        self.raw = {0: mgr.master[0:]}
        self.selectionhistory = mgr.selectionhistory
        self.level = -1
        self.mgr = mgr
        self.labels = {}
        self.limits = {}

        selection = self.raw[0]
        smax, smin = selection[0][6], selection[0][6]
        for dfile in selection:
            size = dfile[6]
            if size > smax:
                smax = size
            elif size < smin:
                smin = size

        self.limits[0] = (smin, smax)
Exemple #17
0
class typeNavMgr(object):
    """ A parent class that generates master data for use in date navigation.
    This object can only work after a database manager has been created.
    The main activity here is to group the master data by year and then navigate
    the data based on queries from visulization windows.
    """
    '''This bit is retained for future reference (took forever to get this info)
        # Remove negative dates
        for item in self.raw:
        if item[4]<0: self.raw.pop(self.raw.index(item))

        # Sort on year
        self.raw.sort(key=lambda x: x[4])'''
    def __init__(self, mgr):

        # Initialize variables
        self.initializevars(mgr)

    # ------------------------------------------------------
    # Methods

    def respond(self, query=None):
        '''accepts input from the parent parent and responds by
            returning the appropriate data'''
        print 'TypeNavigator: recieved a %s command .....' % query
        # Choose the appropriate method and get new data
        if query == 'back' or query == 'fwd' or query == 'rst':
            self.level = self.mgr.navshistory[self.mgr.historypoint][2]
            self.query = None

        elif query == 'update':
            if self.level < 0: self.level += 1  # handle first pass update
            self.query = None

            # prepare global newraw shell for mgr iteration to populate
            self.preprocess()

        else:
            self.level += 1
            self.query = query
            self.queryhistory[self.level] = query

            if self.level <= 1:
                # prepare global newraw shell for mgr iteration to populate
                self.preprocess()

                # check the local raw data and select the data for this query
                selection = self.getselection(self.query)

                # Pass the selection to the datamanager for iteration
                self.mgr.iteriselection(selection,
                                        3)  # 3 = TypeNavigator's call-sign

            else:
                # Roll back because I only want up to level 1
                self.queryhistory.pop(self.level)
                self.level -= 1

    def getselection(self, query):
        # Check the raw data
        if query == None:
            selection = self.selectionhistory[self.mgr.historypoint]
        else:
            selection = self.raw[query]
            self.updatemgr(query, selection)
        return selection

    def preprocess(self):
        if self.level == 0:
            # Define shell for the newraw and data
            self.newraw = OrderedDict([('doc', []), ('vid', []), ('pic', []),
                                       ('aud', []), ('exe', []),
                                       ('others', [])])
            self.data = OrderedDict([('doc', 0), ('vid', 0), ('pic', 0),
                                     ('aud', 0), ('exe', 0), ('others', 0)])

        elif self.level == 1:
            query = self.queryhistory[self.level]
            # Define shell for the newraw and data
            self.newraw = OrderedDict([(query, [])])
            self.data = OrderedDict([(query, 0)])

        elif self.level > 1:
            self.level -= 1

    def processfile(self, dfile):
        if self.level == 0:
            filetype = dfile[4]
            self.newraw[filetype].append(dfile)
        elif self.level == 1:
            filetype = dfile[4]
            self.newraw[filetype].append(dfile)

    def postprocess(self):
        for item in self.newraw:
            self.data[item] = len(self.newraw[item])

        # Label processing before calling the visualizer
        for item in self.queryhistory:
            if item == 0: query = 'All'
            else: query = self.queryhistory[item]
            tag = 'File-Type'
            self.labels[item] = [tag, query]

        self.refreshWindows(self.data, self.level, self.labels)

    def refreshWindows(self, data, level, labels):
        self.mgr.sbook.tpanel.win.reload(data, level, labels)

    def getdata(self, level, query=None):
        if level == 0: data = self.getlvl0data(query)
        elif level == 1: data = self.getlvl1data(query)
        elif level == 2: data = self.getlvl2data(query)
        elif level == 3: data = self.getlvl3data(query)
        elif level == 4: data = self.getlvl4data(query)
        return data

    def updateRaw(self):
        self.raw = self.newraw.copy()

    def updatemgr(self, query, selection):

        # Clear old selections from history
        cache = []
        for item in self.selectionhistory:
            if item > self.mgr.historypoint: cache.append(item)
        for item in cache:
            self.selectionhistory.pop(item)
            self.mgr.navsqueryhistory.pop(item)

        # Clear old queries from history
        cache = []
        for item in self.queryhistory:
            if item > self.level: cache.append(item)
        for item in cache:
            self.queryhistory.pop(item)
            self.labels.pop(item)

        self.mgr.historypoint += 1
        curr = self.mgr.historypoint
        prev = curr - 1

        # update the manager's query history here
        dquery, squery, tquery = self.mgr.navsqueryhistory[prev]
        tquery = query
        self.mgr.navsqueryhistory[curr] = (dquery, squery, tquery)

        self.mgr.typequeryhist.append(query)
        self.queryhistory[self.level] = query
        self.selectionhistory[self.mgr.historypoint] = selection[0:]

    def updatenavs(self):
        self.mgr.updatenavs(1)

    def getlvl0data(self, query=None):
        '''Level zero presents each year with total file sizes
            raw data for this method should be the master list of files'''

        if query != None:
            selection = self.mgr.master
            self.updatemgr(query, selection, 1)
        else:
            selection = self.selectionhistory[self.mgr.historypoint]

        # Define shell for the data
        newraw = OrderedDict([('doc', []), ('vid', []), ('pic', []),
                              ('aud', []), ('exe', []), ('others', [])])
        data = OrderedDict([('doc', 0), ('vid', 0), ('pic', 0), ('aud', 0),
                            ('exe', 0), ('others', 0)])

        #------------------------------------------------------------------------
        # Iterate through the master list and group files by year
        for dfile in selection:
            filetype = dfile[4]
            newraw[filetype].append(dfile)
            data[filetype] = data[filetype] + 1
#-------------------------------------------------------------------------

# Save the raw that level 1 will use to sort itself out
        self.updateRaw(newraw)
        return data

    def initializevars(self, mgr):
        self.query = 0
        self.type = 't'
        self.queryhistory = {0: 'All'}
        self.raw = []
        self.selectionhistory = mgr.selectionhistory
        self.level = -1
        self.mgr = mgr
        self.labels = {}
        self.fpass = True
        self.documents = [
            'ade', 'adp', 'mpd', 'mde', 'mpc', 'mpp', 'mpv', 'vdx', 'mpx',
            'vsl', 'vst', 'vsw', 'vsx', 'vtx', 'dvi', 'eps', 'jnt', 'latex',
            'pm', 'pm5', 'ps', 'pt5', 'rtx', 'tex', 'xml', 'pdf', 'doc', 'dot',
            'htm', 'html', 'mht', 'one', 'rtf', 'txt', 'xml', 'ppz', 'pot',
            'pps', 'ppt', 'xls', 'xlw', 'csv', 'tsv', 'wks', 'xlb', 'xlc',
            'xll', 'xlm', 'xls', 'xlw', 'eml', 'msg', 'vcf', 'vcard', 'ics',
            'vcs'
        ]

        self.videos = [
            'asf', 'asx', 'avi', 'awm', 'cmv', 'm1v', 'mmm', 'mov', 'mp2',
            'mpa', 'mpe', 'mpeg', 'mpg', 'mpv2', 'mwf', 'qt', 'vue', 'wmv',
            'wvx'
        ]

        self.images = [
            '3ds', 'a11', 'ai', 'ani', 'anm', 'art', 'b_w', 'b&w', 'b1n', 'b8',
            'bga', 'bit', 'bld', 'bm', 'bmp', 'cdf', 'cdr', 'cmp', 'cps',
            'cvs', 'dib', 'dip', 'dcx', 'dkb', 'dw2', 'dwg', 'fh3', 'fh4',
            'fit', 'flc', 'fli', 'gcd', 'gif', 'gl', 'gsd', 'hrf', 'hs2',
            'hsi', 'iax', 'ica', 'ico', 'jas', 'jff', 'jpc', 'icb', 'jpe',
            'jpeg', 'jpg', 'jtf', 'lbm', 'mpt', 'msp', 'nc', 'neo', 'pct',
            'pcx', 'pda', 'pdd', 'pgm', 'pix', 'png', 'ppm', 'psd', 'pse',
            'qdv', 'rgb', 'rif', 'rip', 'rix', 'rl4', 'rl8', 'sg1', 'tif',
            'tiff', 'van', 'vda', 'vdr', 'wmf', 'xif', 'xpm'
        ]

        self.audio = [
            'aac', 'aif', 'aiff', 'amf', 'au', 'cda', 'dfs', 'dss', 'far',
            'm3u', 'mid', 'midi', 'mp3', 'mus', 'okt', 'p16', 'psm', 'ra',
            'ram', 'rmi', 's3m', 'snd', 'sng', 'stm', 'ult', 'uw', 'voc',
            'wav', 'wma', 'xm', 'xmi'
        ]

        self.executables = ['exe', 'msi', 'com']

        self.others = [
            'acf', 'aifc', 'ascx', 'asm', 'asp', 'aspx', 'cab', 'cpl', 'cs',
            'css', 'cur', 'def', 'dic', 'emf', 'gz', 'hhc', 'idq', 'ivf',
            'ivf', 'jfif', 'lnk', 'mapiipm.Note', 'mda', 'mp2v', 'odc', 'pl',
            'rle', 'scd', 'tar', 'tgz', 'tsp', 'wax', 'wbk', 'sch', 'wiz',
            'wm', 'wmp', 'wmx', 'wmz', 'wri', 'wsz', 'wtx', 'xlk', 'z', 'zip',
            'bat', 'c', 'cmd', 'cpp', 'cxx', 'Dif', 'disco', 'h', 'hpp', 'hxx',
            'idl', 'inc', 'inf', 'inx', 'js', 'nws', 'pl', 'ppa', 'pwz', 'rc',
            'reg', 'resx', 'slk', 'url', 'vbs', 'xla', 'xld', 'xlt', 'xlv',
            'xsl'
        ]


# ---------------------------------------------------------------------------
class dateNavMgr(object):
    """ A parent class that generates master data for use in date navigation.
    This object can only work after a database manager has been created.
    The main activity here is to group the master data by year and then navigate
    the data based on queries from visulization windows.
    """

    '''This bit is retained for future reference (took forever to get this info)
        # Remove negative dates
        for item in self.raw:
        if item[4]<0: self.raw.pop(self.raw.index(item))

        # Sort on year
        self.raw.sort(key=lambda x: x[4])'''

    def __init__(self, mgr):

        # Initialize variables
        self.initializevars(mgr)

    # ------------------------------------------------------
    # Methods

    def respond(self, query=None):
        '''accepts input from the parent parent and responds by
            returning the appropriate data'''

        # Choose the appropriate method and get new data
        if query == 'back' or query == 'fwd' or query == 'rst':
            self.level = self.mgr.navshistory[self.mgr.historypoint][0]
            self.query = None

        elif query == 'update':
            if self.level < 0: self.level += 1 # handle first pass update
            self.query = None

            # prepare global newraw shell for mgr iteration to populate
            self.preprocess()
            return

        else:
            self.level += 1
            self.query = query

        if self.level <= 3:
            # prepare global newraw shell for mgr iteration to populate
            self.preprocess()

            # check the local raw data and select the data for this query
            selection = self.getselection(self.query)

            # Pass the selection to the datamanager for iteration
            self.mgr.iteriselection(selection, 1) # 1 = DateNavigator's call-sign
        else:
            # Roll back because I only want up to level 1
            self.level -= 1

    def getselection(self, query):
        # Check the raw data
        if query == None:
            selection = self.selectionhistory[self.mgr.historypoint]
        else:
            selection = self.raw[query]
            self.updatemgr(query, selection)
        return selection

    def preprocess(self):
        if self.level == 0:
            self.newraw = OrderedDict({})

        elif self.level == 1:
            if self.query: self.query = int(self.query)
            self.data = OrderedDict([('Jan', 0), ('Feb', 0), ('Mar',0),
                                ('Apr',0), ('May',0), ('Jun',0),
                                ('Jul',0), ('Aug',0), ('Sep',0),
                                ('Oct',0), ('Nov',0), ('Dec',0)])

            self.newraw =  OrderedDict([('Jan', []), ('Feb', []), ('Mar',[]),
                                ('Apr',[]), ('May',[]), ('Jun',[]),
                                ('Jul',[]), ('Aug',[]), ('Sep',[]),
                                ('Oct',[]), ('Nov',[]), ('Dec',[])])

        elif self.level == 2:
            if self.query: key = self.query
            else: key = self.queryhistory[self.level]
            year = self.queryhistory[self.level-1]
            monthkeys = {'Jan':1, 'Feb':2, 'Mar':3, 'Apr':4,
                        'May':5, 'Jun':6, 'Jul':7, 'Aug':8,
                        'Sep':9, 'Oct':10, 'Nov':11, 'Dec':12}

            month = monthkeys[key]
            self.data, self.newraw = OrderedDict({}), OrderedDict({})

            # get a 'list' of weeks in the month (each week is a list of days !!)
            self.weeks = calendar.monthcalendar(year, month)
            for i in range(len(self.weeks)):
                self.data[i+1] = 0 # initalize the week:no_of_files dictionary
                self.newraw[i+1] = [] # initalize the week:no_of_files dictionary

        elif self.level == 3:
            if self.query: self.query = query = int(self.query)
            else: query = self.queryhistory[self.level]

            # Need month key one more time
            monthkey = {'Jan':1, 'Feb':2, 'Mar':3, 'Apr':4,
                    'May':5, 'Jun':6, 'Jul':7, 'Aug':8,
                    'Sep':9, 'Oct':10, 'Nov':11, 'Dec':12}
            year, month =  self.queryhistory[self.level-2],  self.queryhistory[self.level-1]
            month = monthkey[month]

            calmonth = calendar.monthcalendar(year, month)
            self.week = calmonth[query-1][0:] # slice it just to be on the safe side
            self.data = OrderedDict({})
            self.newraw = OrderedDict({})

            # get the weeks in the month
            for i in range(len(self.week)):
                self.data[self.week[i]] = 0 # initalize the day:size dictionary
                self.newraw[self.week[i]] = []

        elif self.level == 4:
            print 'End of the line Boss !!!'
            if self.query:
                self.level -= 1
                self.query = None

            # Empty the outputs of level 3
            for i in range(len(self.week)):
                self.data[self.week[i]] = 0 # initalize the day:size dictionary
                self.newraw[self.week[i]] = []

        else:
            print 'DateNavigator: Error in preprocess ...'

    def processfile(self, dfile):
        if self.level == 0:
            year = dfile[7]
            try: self.newraw[year].append(dfile)
            except: self.newraw[year] = [dfile]

        elif self.level == 1:
            month = dfile[8]
            self.newraw[month].append(dfile) # Append the file to its month in the new rawdata

        elif self.level == 2:
            day = dfile[9]
            for week in self.weeks:
                if week.__contains__(day):
                    self.newraw[self.weeks.index(week) + 1].append(dfile)

        elif self.level == 3:
            day1 = dfile[9]
            for day in self.week:
                if day1 == day:
                    self.newraw[day].append(dfile)

        elif self.level == 4: data = self.getlvl4data(query)

    def postprocess(self):
        if self.level == 0:
            # Get an empty copy of the years for the data dictionary
            self.data = self.newraw.copy()
            for year in self.data:
                self.data[year] = 0 # enpty the copyto get shell for data
            temp = self.data.items() # extract key value pairs in a list of tuples
            temp.sort() # sort the extract
            temp.reverse() # reverse the sorted extract
            self.data = OrderedDict(temp) # recreate a dictionary using the new sorted key-value pairs list

            # Iterate through the raw list and group the data by year
            for year in self.newraw:
                self.data[year] = len(self.newraw[year]) # i.e. no_of-files

        elif self.level == 1:
            for month in self.data:
                self.data[month] = len(self.newraw[month]) # i.e. no_of-files

        elif self.level == 2:
            for week in self.data:
                self.data[week] = len(self.newraw[week]) # i.e. no_of-files

        elif self.level == 3:
            for day in self.data:
                self.data[day] = len(self.newraw[day]) # i.e. no_of_files
        else:
            pass

        # Label processing before calling the visualizer
        for item in self.queryhistory:
            if item == 0: query = 'All'
            else: query = str(self.queryhistory[item])
            tag = self.leveltags[item]
            self.labels[item] = [tag, query]

        self.refreshWindows(self.data, self.level, self.labels)

    def refreshWindows(self, data, level, labels):
        self.mgr.sbook.dpanel.win.reload(data, level, labels)

    def getdata(self, level, query=None):
        if level == 0: data = self.getlvl0data(query)
        elif level == 1: data = self.getlvl1data(query)
        elif level == 2: data = self.getlvl2data(query)
        elif level == 3: data = self.getlvl3data(query)
        elif level == 4: data = self.getlvl4data(query)
        return data

    def updateRaw(self):
        self.raw = self.newraw.copy()

    def updatemgr(self, query, selection):

        # Clear old selections from history
        cache = []
        for item in self.selectionhistory:
            if item > self.mgr.historypoint: cache.append(item)
        for item in cache:
            self.selectionhistory.pop(item)
            self.mgr.navsqueryhistory.pop(item)

        # Clear old queries from history
        cache = []
        for item in self.queryhistory:
            if item > self.level: cache.append(item)
        for item in cache:
            self.queryhistory.pop(item)
            self.labels.pop(item)

        self.mgr.historypoint += 1
        curr = self.mgr.historypoint
        if curr: prev = curr - 1
        else: prev = curr

        self.mgr.datequeryhist.append(query)
        self.queryhistory[self.level] = query
        self.selectionhistory[self.mgr.historypoint] = selection[0:]

        # update the manager's query history here
        dquery, squery, tquery = self.mgr.navsqueryhistory[prev]
        dquery = []
        for item in self.queryhistory:
             step = self.queryhistory[item]
             dquery.append(step)

        self.mgr.navsqueryhistory[curr] = (dquery, squery, tquery)

    def initializevars(self, mgr):
        self.query = 0
        self.type = 'd'
        self.queryhistory = {}
        self.raw = {0:mgr.master[0:]}
        self.selectionhistory = mgr.selectionhistory
        self.level = -1
        self.mgr = mgr
        self.leveltags = ['Date Modified', 'Year', 'Month', 'Week', 'Day', 'Hour']
        self.labels = {}
    def calc_hessian(self, reuse_first=False):
        """Returns the Hessian matrix for all outputs in the Driver's
        workflow.
        
        reuse_first: bool
            Switch to reuse some data from the gradient calculation so that
            we don't have to re-run some points we already ran (namely the
            baseline, +eps, and -eps cases.) Obviously you do this when the
            driver needs gradient and hessian information at the same point,
            and calls calc_gradient before calc_hessian.
        """
        
        # Each component runs its calc_derivatives method.
        # We used to do this in the driver instead, but we've moved it in
        # here to make the interface more uniform.
        self._parent.calc_derivatives(second=True)
        
        self.setup()
        
        # Create our 3D dictionary the first time we execute.
        if not self.hessian:
            for name1 in self.param_names:
                self.hessian[name1] = {}
                for name2 in self.param_names:
                    self.hessian[name1][name2] = {}
                
        self.hessian_ondiag_case = OrderedDict()
        self.hessian_offdiag_case = OrderedDict()

        # Pull stepsizes from driver's parameters
        base_param = OrderedDict()
        stepsize = {}
        for key, item in self._parent.get_parameters().iteritems():
            
            if item.fd_step:
                stepsize[key] = item.fd_step
            else:
                stepsize[key] = self.default_stepsize

        # Diagonal terms in Hessian always need base point
        # Usually, we will have saved this when we calculated
        # the gradient.
        if reuse_first:
            base_param = self.base_param
            base_data = self.base_data
        else:
            # Pull initial state from driver's parameters
            for key, item in self._parent.get_parameters().iteritems():
                base_param[key] = item.evaluate()
                    
            base_data = self._run_point(base_param)
            
        # Assemble input data
        # Cases : ondiag [fp, fm]
        deltas = [1, -1]
        for param in self.param_names:
            
            pcase = []
            for j_step, delta in enumerate(deltas):
                
                case = base_param.copy()
                case[param] += delta*stepsize[param]
                pcase.append({ 'param': case })
                
            self.hessian_ondiag_case[param] = pcase
            
        # Assemble input data
        # Cases : offdiag [fpp, fpm, fmp, fmm]
        deltas = [[1, 1],
                  [1, -1],
                  [-1, 1],
                  [-1, -1]]
        for i, param1 in enumerate(self.param_names):
            
            offdiag = {}
            for param2 in self.param_names[i+1:]:
            
                pcase = []
                for delta in deltas:
                    
                    case = base_param.copy()
                    case[param1] += delta[0]*stepsize[param1]
                    case[param2] += delta[1]*stepsize[param2]
                    pcase.append({ 'param': case })
                offdiag[param2] = pcase
                    
            self.hessian_offdiag_case[param1] = offdiag
            
        # Run all "cases".
        # TODO - Integrate OpenMDAO's concurrent processing capability once it
        # is formalized. This operation is inherently paralellizable.
        
        # We don't need to re-run on-diag cases if the gradients were
        # calculated with Central Difference.
        if reuse_first and self.form=='central':
            for key, case in self.hessian_ondiag_case.iteritems():
                
                gradient_case = self.gradient_case[key]
                for ipcase, pcase in enumerate(case):
                    
                    gradient_ipcase = gradient_case[ipcase]
                    pcase['data'] = gradient_ipcase['data'] 
        else:
            for case in self.hessian_ondiag_case.values():
                for pcase in case:
                    data = self._run_point(pcase['param'])
                    pcase['data'] = data

        # Off-diag cases must always be run.
        for cases in self.hessian_offdiag_case.values():
            for case in cases.values():
                for pcase in case:
                    pcase['data'] = self._run_point(pcase['param'])

                    
        # Calculate Hessians - On Diagonal
        for key, case in self.hessian_ondiag_case.iteritems():
            
            eps = stepsize[key]
            
            for name in list(self.objective_names + \
                             self.eqconst_names + \
                             self.ineqconst_names):
                self.hessian[key][key][name] = \
                    diff_2nd_xx(case[0]['data'][name],
                                base_data[name],
                                case[1]['data'][name], eps)
                
        # Calculate Hessians - Off Diagonal
        for key1, cases in self.hessian_offdiag_case.iteritems():
            
            eps1 = stepsize[key1]
            for key2, case in cases.iteritems():
                
                eps2 = stepsize[key2]
                
                for name in list(self.objective_names + \
                                 self.eqconst_names + \
                                 self.ineqconst_names):
                    self.hessian[key1][key2][name] = \
                        diff_2nd_xy(case[0]['data'][name],
                                    case[1]['data'][name],
                                    case[2]['data'][name],
                                    case[3]['data'][name],
                                    eps1, eps2)
                    
                    # Symmetry
                    # (Should ponder whether we should even store it.)
                    self.hessian[key2][key1][name] = \
                        self.hessian[key1][key2][name]
Exemple #20
0
    "description": _(
        u"help_factsheets",
        default="Provide brief introductions to the Agency and to our "
            "activities.")}

pub_types["presentations"] = {
    "title": _(u"Presentations (PPT)"),
    "description": _(
        u"help_presentations",
        default="PPT presentations on various safety and health topics and "
            "EU-OSHA projects which can be freely adapted and reused. All "
            "English versions are also available on Slideshare "
            "(<a href='http://www.slideshare.net/euosha'>"
            "http://www.slideshare.net/euosha</a>)")}

pub_types["magazine"] = {
    "title": _(u"Magazine"),
    "description": _(
        u"help_magazine",
        default="In-depth information and analysis from leading experts and "
            "practitioners in occupational safety and health.")}

pub_types["evaluation_reports"] = {
    "title": _(u"Evaluation reports"),
    "description": _(
        u"help_evaluation_reports",
        default="An independent assessment of our work and contribute to "
            "improving our performance.")}

PUB_TYPES = pub_types.copy()
Exemple #21
0
class Application(object):
    """Poor WSGI application which is called by WSGI server.

    Working of is describe in PEP 0333. This object store route dispatch table,
    and have methods for it's using and of course __call__ method for use
    as WSGI application.
    """

    __instances = []

    def __init__(self, name="__main__"):
        """Application class is per name singleton.

        That means, there could be exist only one instance with same name.
        """

        if Application.__instances.count(name):
            raise RuntimeError('Application with name %s exist yet.' % name)
        Application.__instances.append(name)

        # Application name
        self.__name = name

        # list of pre and post process handlers
        self.__pre = []
        self.__post = []

        # dhandlers table for default handers on methods {METHOD_GET: handler}
        self.__dhandlers = {}

        # handlers table of simple paths: {'/path': {METHOD_GET: handler}}
        self.__handlers = {}

        self.__filters = {
            ':int': (r'-?\d+', int),
            ':float': (r'-?\d+(\.\d+)?', float),
            ':word': (r'\w+', uni),
            ':hex': (r'[0-9a-fA-F]+', str),
            ':re:': (None, uni),
            'none': (r'[^/]+', uni)
        }

        # handlers of regex paths: {r'/user/([a-z]?)': {METHOD_GET: handler}}
        self.__rhandlers = OrderedDict()

        # http state handlers: {HTTP_NOT_FOUND: {METHOD_GET: my_404_handler}}
        self.__shandlers = {}

        # -- Application variable
        self.__config = {
            'auto_args': True,
            'auto_form': True,
            'auto_json': True,
            'keep_blank_values': 0,
            'strict_parsing': 0,
            'json_content_types': [
                'application/json',
                'application/javascript',
                'application/merge-patch+json'],
            'auto_cookies': True,
            'debug': 'Off',
            'document_root': '',
            'document_index': 'Off',
            'secret_key': '%s%s%s%s' %
                          (__version__, version, getcwd(),
                           ''.join(str(x) for x in uname()))
        }

        try:
            self.__log_level = levels[environ.get('poor_LogLevel',
                                                  'warn').lower()]
        except:
            self.__log_level = LOG_WARNING
            self.log_error('Bad poor_LogLevel, default is warn.', LOG_WARNING)
        # endtry
    # enddef

    def __regex(self, match):
        groups = match.groups()
        _filter = str(groups[1]).lower()

        if _filter in self.__filters:
            regex = self.__filters[_filter][0]
        elif _filter[:4] == ':re:':     # :re: filter have user defined regex
            regex = _filter[4:]
        else:
            try:
                regex = self.__filters[_filter][0]
            except KeyError:
                raise RuntimeError("Undefined route group filter '%s'" %
                                   _filter)

        return "(?P<%s>%s)" % (groups[0], regex)
    # enddef

    def __convertor(self, _filter):
        _filter = str(_filter).lower()
        _filter = ':re:' if _filter[:4] == ':re:' else _filter
        try:
            return self.__filters[_filter][1]
        except KeyError:
            raise RuntimeError("Undefined route group filter '%s'" % _filter)

    @property
    def name(self):
        """Return application name."""
        return self.__name

    @property
    def filters(self):
        """Copy of filter table.

        Filter table contains regular expressions and convert functions,
        see Application.set_filter and Application.route.

        Default filters are:
            :int - match number and convert it to int
            :float - match number and convert it to float
            :word - match one unicoee word
            :hex - match hexadecimal value and convert it to str
            :re: - match user defined regular expression
            none - match any string withount '/' character

        For more details see {/debug-info} page of your application, where
        you see all filters with regular expression definition.
        """
        return self.__filters.copy()

    @property
    def pre(self):
        """Tuple of table with pre-process handlers.

        See Application.pre_process.
        """
        return tuple(self.__pre)

    @property
    def post(self):
        """Tuple of table with post-process handlers.

        See Application.post_process.
        """
        return tuple(self.__post)

    @property
    def dhandlers(self):
        """Copy of table with default handlers.

        See Application.set_default
        """
        return self.__dhandlers.copy()

    @property
    def handlers(self):
        """Copy of table with static handlers.

        See Application.route.
        """
        return self.__handlers.copy()

    @property
    def rhandlers(self):
        """Copy of table with regular expression handlers.

        See Application.route and Application.rroute.
        """
        return self.__rhandlers.copy()

    @property
    def shandlers(self):
        """Copy of table with http state aka error handlers.

        See Application.http_state
        """
        return self.__shandlers.copy()

    @property
    def auto_args(self):
        """Automatic parsing request arguments from uri.

        If it is True (default), Request object do automatic parsing request
        uri to its args variable.
        """
        return self.__config['auto_args']

    @auto_args.setter
    def auto_args(self, value):
        self.__config['auto_args'] = bool(value)

    @property
    def auto_form(self):
        """Automatic parsing arguments from request body.

        If it is True (default) and method is POST, PUT or PATCH, Request
        object do automatic parsing request body to its form variable.
        """
        return self.__config['auto_form']

    @auto_form.setter
    def auto_form(self, value):
        self.__config['auto_form'] = bool(value)

    @property
    def auto_json(self):
        """Automatic parsing JSON from request body.

        If it is True (default), method is POST, PUT or PATCH and request
        content type is one of json_content_types, Request object do
        automatic parsing request body to json variable.
        """
        return self.__config['auto_json']

    @auto_json.setter
    def auto_json(self, value):
        self.__config['auto_json'] = bool(value)

    @property
    def auto_cookies(self):
        """Automatic parsing cookies from request headers.

        If it is True (default) and Cookie request header was set,
        SimpleCookie object was paresed to Request property cookies.
        """
        return self.__config['auto_cookies']

    @auto_cookies.setter
    def auto_cookies(self, value):
        self.__config['auto_cookies'] = bool(value)

    @property
    def debug(self):
        """Application debug as another way how to set poor_Debug.

        This setting will be rewrite by poor_Debug environment variable.
        """
        return self.__config['debug'] == 'On'

    @debug.setter
    def debug(self, value):
        self.__config['debug'] = 'On' if bool(value) else 'Off'

    @property
    def document_root(self):
        """Application document_root as another way how to set poor_DocumentRoot.

        This setting will be rewrite by poor_DocumentRoot environ variable.
        """
        return self.__config['document_root']

    @document_root.setter
    def document_root(self, value):
        self.__config['document_root'] = value

    @property
    def document_index(self):
        """Application document_root as another way how to set poor_DocumentRoot.

        This setting will be rewrite by poor_DocumentRoot environ variable.
        """
        return self.__config['document_index'] == 'On'

    @document_index.setter
    def document_index(self, value):
        self.__config['document_index'] = 'On' if bool(value) else 'Off'

    @property
    def secret_key(self):
        """Application secret_key could be replace by poor_SecretKey in request.

        Secret key is used by PoorSession class. It is generate from
        some server variables, and the best way is set to your own long
        key."""
        return self.__config['secret_key']

    @secret_key.setter
    def secret_key(self, value):
        self.__config['secret_key'] = value

    @property
    def keep_blank_values(self):
        """Keep blank values in request arguments.

        If it is 1 (0 is default), automatic parsing request uri or body
        keep blank values as empty string.
        """
        return self.__config['keep_blank_values']

    @keep_blank_values.setter
    def keep_blank_values(self, value):
        self.__config['keep_blank_values'] = int(value)

    @property
    def strict_parsing(self):
        """Strict parse request arguments.

        If it is 1 (0 is default), automatic parsing request uri or body
        raise with exception on parsing error.
        """
        return self.__config['strict_parsing']

    @strict_parsing.setter
    def strict_parsing(self, value):
        self.__config['strict_parsing'] = int(value)

    @property
    def json_content_types(self):
        """Copy of json content type list.

        Containt list of strings as json content types, which is use for
        testing, when automatics Json object is create from request body.
        """
        return self.__config['json_content_types']

    def set_filter(self, name, regex, convertor=uni):
        """Create new filter or overwrite builtins.

        Arguments:
            name      - Name of filter which is used in route or set_route
                        method.
            regex     - regular expression which used for filter
            convertor - convertor function or class, which gets unicode in
                        input. Default is uni function, which is wrapper
                        to unicode string.

            app.set_filter('uint', r'\d+', int)
        """
        name = ':'+name if name[0] != ':' else name
        self.__filters[name] = (regex, convertor)

    def pre_process(self):
        """Append pre process hendler.

        This is decorator for function to call before each request.

            @app.pre_process()
            def before_each_request(req):
                ...
        """
        def wrapper(fn):
            self.__pre.append(fn)
            return fn
        return wrapper
    # enddef

    def add_pre_process(self, fn):
        """Append pre proccess handler.

        Method adds function to list functions which is call before each
        request.

            app.add_pre_process(before_each_request)
        """
        self.__pre.append(fn)
    # enddef

    def post_process(self):
        """Append post process handler.

        This decorator append function to be called after each request,
        if you want to use it redefined all outputs.

            @app.pre_process()
            def after_each_request(req):
                ...
        """
        def wrapper(fn):
            self.__post.append(fn)
            return fn
        return wrapper
    # enddef

    def add_post_process(self, fn):
        """Append post process handler.

        Method for direct append function to list functions which are called
        after each request.

            app.add_post_process(after_each_request)
        """
        self.__post.append(fn)
    # enddef

    def default(self, method=METHOD_HEAD | METHOD_GET):
        """Set default handler.

        This is decorator for default handler for http method (called before
        error_not_found).

            @app.default(METHOD_GET_POST)
            def default_get_post(req):
                # this function will be called if no uri match in internal
                # uri table with method. It's similar like not_found error,
                # but without error
                ...
        """
        def wrapper(fn):
            self.set_default(fn, method)
        return wrapper
    # enddef

    def set_default(self, fn, method=METHOD_HEAD | METHOD_GET):
        """Set default handler.

        Set fn default handler for http method called befor error_not_found.

            app.set_default(default_get_post, METHOD_GET_POST)
        """
        for m in methods.values():
            if method & m:
                self.__dhandlers[m] = fn
    # enddef

    def pop_default(self, method):
        """Pop default handler for method."""
        return self.__dhandlers(method)

    def route(self, uri, method=METHOD_HEAD | METHOD_GET):
        """Wrap function to be handler for uri and specified method.

        You can define uri as static path or as groups which are hand
        to handler as next parameters.

            # static uri
            @app.route('/user/post', method=METHOD_POST)
            def user_create(req):
                ...

            # group regular expression
            @app.route('/user/<name>')
            def user_detail(req, name):
                ...

            # group regular expression with filter
            @app.route('/<surname:word>/<age:int>')
            def surnames_by_age(req, surname, age):
                ...

            # group with own regular expression filter
            @app.route('/<car:re:\w+>/<color:re:#[\da-fA-F]+>')
            def car(req, car, color):
                ...

        If you can use some name of group which is python keyword, like class,
        you can use **kwargs syntax:

            @app.route('/<class>/<len:int>')
            def classes(req, **kwargs):
                return "'%s' class is %d lenght." % \
                    (kwargs['class'], kwargs['len'])

        Be sure with ordering of call this decorator or set_route function with
        groups regular expression. Regular expression routes are check with the
        same ordering, as you create internal table of them. First match stops
        any other searching. In fact, if groups are detect, they will be
        transfer to normal regular expression, and will be add to second
        internal table.
        """
        def wrapper(fn):
            self.set_route(uri, fn, method)
            return fn
        return wrapper
    # enddef

    def set_route(self, uri, fn, method=METHOD_HEAD | METHOD_GET):
        """Set handler for uri and method.

        Another way to add fn as handler for uri. See Application.route
        documentation for details.

            app.set_route('/use/post', user_create, METHOD_POST)
        """
        uri = uni(uri)

        if re_filter.search(uri):
            r_uri = re_filter.sub(self.__regex, uri) + '$'
            convertors = tuple((g[0], self.__convertor(g[1]))
                               for g in (m.groups()
                               for m in re_filter.finditer(uri)))
            self.set_rroute(r_uri, fn, method, convertors)
        else:
            if uri not in self.__handlers:
                self.__handlers[uri] = {}
            for m in methods.values():
                if method & m:
                    self.__handlers[uri][m] = fn
    # enddef

    def pop_route(self, uri, method):
        """Pop handler for uri and method from handers table.

        Method must be define unique, so METHOD_GET_POST could not be use.
        If you want to remove handler for both methods, you must call pop route
        for each method state.
        """
        uri = uni(uri)

        if re_filter.search(uri):
            r_uri = re_filter.sub(self.__regex, uri) + '$'
            return self.pop_rroute(r_uri, method)
        else:
            handlers = self.__handlers.get(uri, {})
            rv = handlers.pop(method)
            if not handlers:    # is empty
                self.__handlers.pop(uri, None)
            return rv

    def is_route(self, uri):
        """Check if uri have any registered record."""
        uri = uni(uri)
        if re_filter.search(uri):
            r_uri = re_filter.sub(self.__regex, uri) + '$'
            return self.is_rroute(r_uri)
        return uri in self.__handlers

    def rroute(self, ruri, method=METHOD_HEAD | METHOD_GET):
        """Wrap function to be handler for uri defined by regular expression.

        Both of function, rroute and set_rroute store routes to special
        internal table, which is another to table of static routes.

            @app.rroute(r'/user/\w+')               # simple regular expression
            def any_user(req):
                ...

            @app.rroute(r'/user/(?P<user>\w+)')     # regular expression with
            def user_detail(req, user):             # groups
                ...

        Be sure with ordering of call this decorator or set_rroute function.
        Regular expression routes are check with the same ordering, as you
        create internal table of them. First match stops any other searching.
        """
        def wrapper(fn):
            self.set_rroute(ruri, fn, method)
            return fn
        return wrapper
    # enddef

    def set_rroute(self, r_uri, fn, method=METHOD_HEAD | METHOD_GET,
                   convertors=()):
        """Set hanlder for uri defined by regular expression.

        Another way to add fn as handler for uri defined by regular expression.
        See Application.rroute documentation for details.

            app.set_rroute('/use/\w+/post', user_create, METHOD_POST)

        This method is internally use, when groups are found in static route,
        adding by route or set_route method.
        """
        r_uri = re.compile(r_uri, re.U)
        if r_uri not in self.__rhandlers:
            self.__rhandlers[r_uri] = {}
        for m in methods.values():
            if method & m:
                self.__rhandlers[r_uri][m] = (fn, convertors)
    # enddef

    def pop_rroute(self, r_uri, method):
        """Pop handler and convertors for uri and method from handlers table.

        For mor details see Application.pop_route.
        """
        r_uri = re.compile(r_uri, re.U)
        handlers = self.__rhandlers.get(r_uri, {})
        rv = handlers.pop(method)
        if not handlers:    # is empty
            self.__rhandlers.pop(r_uri, None)
        return rv

    def is_rroute(self, r_uri):
        """Check if regular expression uri have any registered record."""
        r_uri = re.compile(r_uri, re.U)
        return r_uri in self.__rhandlers

    def http_state(self, code, method=METHOD_HEAD | METHOD_GET | METHOD_POST):
        """Wrap function to handle http status codes like http errors."""
        def wrapper(fn):
            self.set_http_state(code, fn, method)
        return wrapper
    # enddef

    def set_http_state(self, code, fn,
                       method=METHOD_HEAD | METHOD_GET | METHOD_POST):
        """Set fn as handler for http state code and method."""
        if code not in self.__shandlers:
            self.__shandlers[code] = {}
        for m in methods.values():
            if method & m:
                self.__shandlers[code][m] = fn
    # enddef

    def pop_http_state(self, code, method):
        """Pop handerl for http state and method.

        As Application.pop_route, for pop multimethod handler, you must call
        pop_http_state for each method.
        """
        handlers = self.__shandlers(code, {})
        return handlers.pop(method)

    def error_from_table(self, req, code):
        """Internal method, which is called if error was accured.

        If status code is in Application.shandlers (fill with http_state
        function), call this handler.
        """
        if code in self.__shandlers \
                and req.method_number in self.__shandlers[code]:
            try:
                handler = self.__shandlers[code][req.method_number]
                if 'uri_handler' not in req.__dict__:
                    req.uri_rule = '_%d_error_handler_' % code
                    req.uri_handler = handler
                self.handler_from_pre(req)       # call pre handlers now
                handler(req)
            except:
                internal_server_error(req)
        elif code in default_shandlers:
            handler = default_shandlers[code][METHOD_GET]
            handler(req)
        else:
            not_implemented(req, code)
    # enddef

    def handler_from_default(self, req):
        """Internal method, which is called if no handler is found."""
        if req.method_number in self.__dhandlers:
            req.uri_rule = '_default_handler_'
            req.uri_handler = self.__dhandlers[req.method_number]
            self.handler_from_pre(req)       # call pre handlers now
            retval = self.__dhandlers[req.method_number](req)
            if retval != DECLINED:
                raise SERVER_RETURN(retval)
    # enddef

    def handler_from_pre(self, req):
        """Internal method, which run all pre (pre_proccess) handlers.

        This method was call before end-point route handler.
        """
        for fn in self.__pre:
            fn(req)

    def handler_from_table(self, req):
        """Call right handler from handlers table (fill with route function).

        If no handler is fined, try to find directory or file if Document Root,
        resp. Document Index is set. Then try to call default handler for right
        method or call handler for status code 404 - not found.
        """

        # static routes
        if req.uri in self.__handlers:
            if req.method_number in self.__handlers[req.uri]:
                handler = self.__handlers[req.uri][req.method_number]
                req.uri_rule = req.uri      # nice variable for pre handlers
                req.uri_handler = handler
                self.handler_from_pre(req)  # call pre handlers now
                retval = handler(req)       # call right handler now
                # return text is allowed
                if isinstance(retval, str) \
                        or (_unicode_exist and isinstance(retval, unicode)):
                    req.write(retval, 1)    # write data and flush
                    retval = DONE
                if retval != DECLINED:
                    raise SERVER_RETURN(retval or DONE)  # could be state.DONE
            else:
                raise SERVER_RETURN(HTTP_METHOD_NOT_ALLOWED)
            # endif
        # endif

        # regular expression
        for ruri in self.__rhandlers.keys():
            match = ruri.match(req.uri)
            if match and req.method_number in self.__rhandlers[ruri]:
                handler, convertors = self.__rhandlers[ruri][req.method_number]
                req.uri_rule = ruri.pattern  # nice variable for pre handlers
                req.uri_handler = handler
                self.handler_from_pre(req)   # call pre handlers now
                if len(convertors):
                    # create OrderedDict from match insead of dict for
                    # convertors applying
                    req.groups = OrderedDict(
                        (g, c(v))for ((g, c), v) in zip(convertors,
                                                        match.groups()))
                    retval = handler(req, *req.groups.values())
                else:
                    req.groups = match.groupdict()
                    retval = handler(req, *match.groups())
                # return text is allowed
                if isinstance(retval, str) \
                        or (_unicode_exist and isinstance(retval, unicode)):
                    req.write(retval, 1)    # write data and flush
                    retval = DONE
                if retval != DECLINED:
                    raise SERVER_RETURN(retval or DONE)  # could be state.DONE
            # endif - no METHOD_NOT_ALLOWED here
        # endfor

        # try file or index
        if req.document_root():
            rfile = "%s%s" % (uni(req.document_root()),
                              path.normpath("%s" % uni(req.uri)))

            if not path.exists(rfile):
                if req.debug and req.uri == '/debug-info':      # work if debug
                    req.uri_rule = '_debug_info_'
                    req.uri_handler = debug_info
                    self.handler_from_pre(req)  # call pre handlers now
                    raise SERVER_RETURN(debug_info(req, self))
                self.handler_from_default(req)                  # try default
                raise SERVER_RETURN(HTTP_NOT_FOUND)             # not found

            # return file
            if path.isfile(rfile) and access(rfile, R_OK):
                req.uri_rule = '_send_file_'
                req.uri_handler = send_file
                self.handler_from_pre(req)      # call pre handlers now
                req.log_error("Return file: %s" % req.uri, LOG_INFO)
                raise SERVER_RETURN(send_file(req, rfile))

            # return directory index
            if req.document_index and path.isdir(rfile) \
                    and access(rfile, R_OK):
                req.log_error("Return directory: %s" % req.uri, LOG_INFO)
                req.uri_rule = '_directory_index_'
                req.uri_handler = directory_index
                self.handler_from_pre(req)      # call pre handlers now
                raise SERVER_RETURN(directory_index(req, rfile))

            raise SERVER_RETURN(HTTP_FORBIDDEN)
        # endif

        if req.debug and req.uri == '/debug-info':
            req.uri_rule = '_debug_info_'
            req.uri_handler = debug_info
            self.handler_from_pre(req)          # call pre handlers now
            raise SERVER_RETURN(debug_info(req, self))

        self.handler_from_default(req)

        req.log_error("404 Not Found: %s" % req.uri, LOG_ERR)
        raise SERVER_RETURN(HTTP_NOT_FOUND)
    # enddef

    def __request__(self, environ, start_response):
        """Create Request instance and return wsgi response.

        This method create Request object, call handlers from
        Application.__pre (Application.handler_from_pre),
        uri handler (handler_from_table), default handler
        (Application.handler_from_default) or error handler
        (Application.error_from_table), and handlers from
        Application.__post.
        """
        req = Request(environ, start_response, self.__config)

        try:
            self.handler_from_table(req)
        except SERVER_RETURN as e:
            code = e.args[0]
            if code in (OK, HTTP_OK, DONE):
                pass
            # XXX: elif code in (HTTP_MOVED_PERMANENTLY,
            #                    HTTP_MOVED_TEMPORARILY):
            else:
                req.status = code
                self.error_from_table(req, code)
        except (BrokenClientConnection, SystemExit) as e:
            req.log_error(str(e), LOG_ERR)
            req.log_error('   ***   You shoud ignore next error   ***',
                          LOG_ERR)
            return ()
        except:
            self.error_from_table(req, 500)
        # endtry

        try:    # call post_process handler
            for fn in self.__post:
                fn(req)
        except:
            self.error_from_table(req, 500)
        # endtry

        return req.__end_of_request__()    # private call of request
    # enddef

    def __call__(self, environ, start_response):
        """Callable define for Application instance.

        This method run __request__ method.
        """
        if self.__name == '__poorwsgi__':
            stderr.write("[W] Using deprecated instance of Application.\n")
            stderr.write("    Please, create your own instance\n")
            stderr.flush()
        return self.__request__(environ, start_response)

    def __profile_request__(self, environ, start_response):
        """Profiler version of __request__.

        This method is used if set_profile is used."""
        def wrapper(rv):
            rv.append(self.__original_request__(environ, start_response))

        rv = []
        uri_dump = (self._dump + environ.get('PATH_INFO').replace('/', '_')
                    + '.profile')
        self.log_error('Generate %s' % uri_dump, LOG_INFO)
        self._runctx('wrapper(rv)', globals(), locals(), filename=uri_dump)
        return rv[0]
    # enddef

    def __repr__(self):
        return '%s - callable Application class instance' % self.__name

    def set_profile(self, runctx, dump):
        """Set profiler for __call__ function.

        Arguments:
            runctx - function from profiler module
            dump - path and prefix for .profile files

        Typical usage:

            import cProfile

            cProfile.runctx('from simple import *', globals(), locals(),
                            filename="log/init.profile")
            app.set_profile(cProfile.runctx, 'log/req')
        """
        self._runctx = runctx
        self._dump = dump

        self.__original_request__ = self.__request__
        self.__request__ = self.__profile_request__
    # enddef

    def del_profile(self):
        """Remove profiler from application."""
        self.__request__ = self.__original_request__

    def get_options(self):
        """Returns dictionary with application variables from system environment.

        Application variables start with {app_} prefix,
        but in returned dictionary is set without this prefix.

            #!ini
            poor_LogLevel = warn        # Poor WSGI variable
            app_db_server = localhost   # application variable db_server
            app_templates = app/templ   # application variable templates

        This method works like Request.get_options, but work with
        os.environ, so it works only with wsgi servers, which set not only
        request environ, but os.environ too. Apaches mod_wsgi don't do that,
        uWsgi and PoorHTTP do that.
        """
        options = {}
        for key, val in environ.items():
            key = key.strip()
            if key[:4].lower() == 'app_':
                options[key[4:].lower()] = val.strip()
        return options
    # enddef

    def log_error(self, message, level=LOG_ERR):
        """Logging method with the same functionality like in Request object.

        But as get_options read configuration from os.environ which could
        not work in same wsgi servers like Apaches mod_wsgi.

        This method write to stderr so messages, could not be found in
        servers error log!
        """
        if self.__log_level[0] >= level[0]:
            if _unicode_exist and isinstance(message, unicode):
                message = message.encode('utf-8')
            try:
                stderr.write("<%s> [%s] %s\n" % (level[1], self.__name,
                                                 message))
            except UnicodeEncodeError:
                if _unicode_exist:
                    message = message.decode('utf-8').encode(
                        'ascii', 'backslashreplace')
                else:
                    message = message.encode(
                        'ascii', 'backslashreplace').decode('ascii')

                stderr.write("<%s> [%s] %s\n" % (level[1], self.__name,
                                                 message))
            stderr.flush()
    # enddef

    def log_info(self, message):
        """Logging method, which create message as LOG_INFO level."""
        self.log_error(message, LOG_INFO)

    def log_debug(self, message):
        """Logging method, which create message as LOG_DEBUG level."""
        self.log_error(message, LOG_DEBUG)

    def log_warning(self, message):
        """Logging method, which create message as LOG_WARNING level."""
        self.log_error(message, LOG_WARNING)