Exemple #1
0
def _produce_ordered_dict():
    data_dict = OrderedDict()
    data_dict.update({"Sheet1": [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]})
    data_dict.update({"Sheet2": [[4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6]]})
    data_dict.update(
        {"Sheet3": [[u"X", u"Y", u"Z"], [1, 4, 7], [2, 5, 8], [3, 6, 9]]})
    return data_dict
    def edge_list(self):
        """ Return the list of edges for the derivatives of this workflow. """

        self._edges = super(CyclicWorkflow, self).edge_list()

        # TODO: Shouldn't have to do this everytime.
        if len(self._mapped_severed_edges) > 0:

            cyclic_edges = OrderedDict()
            for edge in self._mapped_severed_edges:
                cyclic_edges[edge[0]] = edge[1]

            # Finally, modify our edge list to include the severed edges, and
            # exclude the boundary edges.
            for src, targets in self._edges.iteritems():
                if '@in' not in src or \
                        not any(edge in cyclic_edges.values() for edge in targets):
                    if isinstance(targets, str):
                        targets = [targets]

                    newtargets = []
                    for target in targets:
                        if '@out' not in target or \
                                        src not in cyclic_edges:
                            newtargets.append(target)

                    if len(newtargets) > 0:
                        cyclic_edges[src] = newtargets

            self._edges = cyclic_edges

        return self._edges
Exemple #3
0
def infer_column_types(csv_filename, count):
    csv_reader = csv.reader(open(csv_filename, 'rb'), delimiter=',', quotechar='"')
    types = OrderedDict()
    current = 0
    for row in csv_reader:
        # The first row contains column names.  Use them to build the dictionary keys.
        if current == 0:
            for item in row:
                types[item] = NoneType

        elif current < count or count == 0:
            index = 0
            for item in types:
                type = types[item]
                try:
                    int(row[index])
                    if type == NoneType:
                        type = IntType
                except:
                    try:
                        float(row[index])
                        if type == NoneType or type == IntType:
                            type = FloatType
                    except:
                        type = StringType
                types[item] = type
                index += 1
        current += 1
    return types
Exemple #4
0
 def __new__(cls, name, bases, attrs):
     attrs['prefix'] = attrs.get('prefix', 'form')
     attrs = OrderedDict(
         sorted([(k, v) for k, v in attrs.iteritems()],
                cmp=lambda x, y: cmp(getattr(x[1], 'order_counter', None),
                                     getattr(y[1], 'order_counter', None))))
     return validators.DeclarativeMeta.__new__(cls, name, bases, attrs)
 def test_delitem(self):
     pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
     od = OrderedDict(pairs)
     del od['a']
     self.assert_('a' not in od)
     self.assertRaises(KeyError, od.__delitem__, 'a')
     self.assertEqual(list(od.items()), pairs[:2] + pairs[3:])
Exemple #6
0
 def find_crashed_impalads(self, start_time):
     """If any impalads are found not running, they will assumed to have crashed. A crash
    info message will be return for each stopped impalad. The return value is a dict
    keyed by impalad. See Impalad.find_last_crash_message() for info about the returned
    messages. 'start_time' is used to filter log messages and core dumps, it should
    be set to the time when the Impala service was started. Impalads that have
    non-generic crash info will be sorted last in the returned dict.
 """
     stopped_impalads = self.find_stopped_impalads()
     if not stopped_impalads:
         return stopped_impalads
     messages = OrderedDict()
     impalads_with_message = dict()
     for i, message in izip(
             stopped_impalads,
             self.for_each_impalad(
                 lambda i: i.find_last_crash_message(start_time),
                 impalads=stopped_impalads)):
         if message:
             impalads_with_message[i] = "%s crashed:\n%s" % (i.host_name,
                                                             message)
         else:
             messages[
                 i] = "%s crashed but no info could be found" % i.host_name
     messages.update(impalads_with_message)
     return stopped_impalads
Exemple #7
0
def parse(tokens):
    if tokens[0] == '{':
        ret = OrderedDict()
        tokens = tokens[1:]
        while tokens[0] != '}':
            key = tokens[0]
            tokens = tokens[1:]

            tokens = tokens[1:]  # :

            value, tokens = parse(tokens)

            if tokens[0] == ',':
                tokens = tokens[1:]

            ret[key] = value
        tokens = tokens[1:]
        return ret, tokens
    elif tokens[0] == '[':
        ret = []
        tokens = tokens[1:]
        while tokens[0] != ']':
            value, tokens = parse(tokens)
            if tokens[0] == ',':
                tokens = tokens[1:]
            ret.append(value)
        tokens = tokens[1:]
        return ret, tokens
    else:
        return tokens[0], tokens[1:]
Exemple #8
0
 def check_data(self):
     if not isinstance(self.raw_data, dict):
         raise ValidationError('Value must be dict')
     instances = OrderedDict()
     for name, typ in self.named_types.iteritems():
         instances[name] = typ(self.raw_data.get(name), True)
     return instances
Exemple #9
0
 def __init__(self, content=None, content_type=None):
     self.content = content
     self.is_authorized = False
     self.extra_headers = OrderedDict()
     self.content_type = content_type
     #FIXME: Return a default HTML page if no other content present
     super(AuthorizationResponse, self).__init__()
Exemple #10
0
    def to_object_literal(self, partial=False, child=False):
        dictionary = OrderedDict()

        for f in self.fields:
            value = getattr(self, f.attribute_name)
            if not isinstance(value, bool) and not isinstance(
                    value, int) and not value:
                value = None
            elif isinstance(value, Field):
                value = None
            elif isinstance(value, str) and value == '':
                value = None

            if value is None:
                show_blank = not child and not f.blank
                if partial and f.extended:
                    show_blank = False
                if not f.produceable:
                    show_blank = False

                if show_blank:
                    if isinstance(f, BooleanField):
                        dictionary[f.dictionary_key] = False
                    elif isinstance(f, IntegerField):
                        dictionary[f.dictionary_key] = 0
                    else:
                        dictionary[f.dictionary_key] = None
            else:
                if isinstance(f, ViewModelField):
                    dictionary[f.dictionary_key] = value.to_object_literal(
                        partial=partial, child=True)
                elif not child or not f.extended:
                    dictionary[f.dictionary_key] = f.to_object_literal(value)

        return dictionary
Exemple #11
0
    def to_object_literal(self, partial=False, child=True):
        dictionary = OrderedDict()
        if self.view_models is not None:
            for view_model in self.view_models:
                dictionary[view_model.name] = view_model.value

        return dictionary
Exemple #12
0
    def json(self):
        """Returns a pretty JSON-encoded version of the exception

        Note: Excludes the HTTP status line, since the results of this call
        are meant to be returned in the body of an HTTP response.

        Returns:
            A JSON representation of the exception except the status line, or
            NONE if title was set to None.

        """

        if self.title is None:
            return None

        obj = OrderedDict()
        obj['title'] = self.title

        if self.description:
            obj['description'] = self.description

        if self.code:
            obj['code'] = self.code

        if self.link:
            obj['link'] = self.link

        return json.dumps(obj,
                          indent=4,
                          separators=(',', ': '),
                          ensure_ascii=False)
Exemple #13
0
 def read_all(self):
     """read all available sheets"""
     result = OrderedDict()
     for sheet in self.native_book.sheets:
         data_dict = self.read_sheet(sheet)
         result.update(data_dict)
     return result
    def set_movie_defaults(self, movies):
        """Returns an ordered dictionary containing the details for
        a list of movies [{"id":"movie1", "title":"Custom title"}]

        Description, video_width and video_height are only used by the
        main Film (not the episodes)"""
        movie_defaults = OrderedDict()
        for movie in movies:
            movie_id = movie["id"]
            mov = movie_defaults[movie_id] = {}
            mov["title"] = movie.setdefault("title", "heading_"+movie_id)
            mov["description"] = movie.setdefault("description",
                                                  "description_"+movie_id)
            mov["image"] = movie.setdefault(
                "image", urljoin(self.media_url, movie_id+".jpg"))
            mov["video_mp4"] = movie.setdefault(
                "video_mp4", urljoin(self.media_url, movie_id+".mp4"))
            mov["video_ogv"] = movie.setdefault(
                "video_ogv", urljoin(self.media_url, movie_id+".ogv"))
            mov["video_webm"] = movie.setdefault(
                "video_webm",  urljoin(self.media_url, movie_id+".webm"))
            mov["video_avi"] = movie.setdefault(
                "video_avi",  urljoin(self.media_url, movie_id+".avi"))
            mov["video_wmv"] = movie.setdefault(
                "video_wmv",  urljoin(self.media_url, movie_id+".wmv"))
            mov["video_width"] = movie.setdefault("video_width", 640)
            mov["video_height"] = movie.setdefault("video_height", 352)
        return movie_defaults
Exemple #15
0
    def add_package(self,
                    pkgname=None,
                    tag=None,
                    libs=None,
                    libdir=None,
                    incdir=None,
                    bindir=None,
                    pymodule=None):
        self._deps = OrderedDict()

        if pkgname:
            if not tag:
                BuildError("No tag specified")
            if self.selfupdate:
                #if not os.path.exists(tag):
                utils.update_tree(tag)
            self._add_path(pkgname, self.ASKAPROOT, tag, libs, libdir, incdir,
                           bindir, pymodule)
            self.q_print("info: Adding package '%s'" % pkgname)

        if tag:
            tag = os.path.join(self.ASKAPROOT, tag)

        self._get_dependencies(tag, explicit=True)

        parent = ''
        for key, value in self._deps.iteritems():
            self._add_path(key,
                           self.ASKAPROOT,
                           value["path"],
                           libs=value["libs"],
                           parent=parent)
            parent = value["path"]
Exemple #16
0
        def do_sort(obj):
            od = OrderedDict()
            # The usual order
            if sort_order:
                for key in sort_order:
                    if key in obj:
                        od[key] = obj[key]
                        del obj[key]
            # The number order
            if sort_numeric:
                nums = []
                for key in obj:
                    if key.isdigit():
                        nums.append(int(key))
                nums.sort()
                for num in nums:
                    key = str(num)
                    od[key] = obj[key]
                    del obj[key]
            # The remaining stuff (in alphabetical order)
            keys = sorted(obj.keys())
            for key in keys:
                od[key] = obj[key]
                del obj[key]

            assert len(obj) == 0
            return od
Exemple #17
0
    def __init__(self, silent=True, autobuild=False):
        ## The ASKAP top-level directory
        self.ASKAPROOT = os.environ.get("ASKAP_ROOT")
        if self.ASKAPROOT is None:
            msg = "ASKAP_ROOT environment variable is not defined"
            raise BuildError(msg)
        #
        self.DEPFILE = "dependencies"  # The basename of the dependency file
        self.INSTALL_SUBDIR = "install"
        self._deps = OrderedDict()
        #
        self._bindirs = []
        self._incdirs = []
        self._libdirs = []
        self._rootdirs = []
        #
        self._cppflags = []  # XXX "defs" in package.info. LOFAR/log4cxx
        #
        self._env = []
        self._jars = []
        self._libs = []
        self._packages = []
        #
        self._ldlibpath = ""
        self._pypath = ""
        #
        self._autobuild = autobuild
        self._silent = silent  # mimimal output
        self.selfupdate = False  # should object request updates from svn

        self._codename = utils.get_platform()['codename']
        self._system = utils.get_platform()['system'].lower()
        self._hostname = socket.gethostname().split(".")[0]
Exemple #18
0
    def attach(self, atomsptr):
        self.finalise()
        self._atomsptr = atomsptr

        self.n, n_int, n_real, n_str, n_logical, iloc, rloc, sloc, lloc, latticeloc, gloc = \
                   libatoms.atoms_get_data(self._atomsptr)

        self.int = arraydata((self.n, n_int), int, iloc)
        self.real = arraydata((self.n, n_real), float, rloc)
        self.str = arraydata((self.n, n_str), 'S10', sloc)
        self.logical = arraydata((self.n, n_logical), bool, sloc)

        self.lattice = arraydata((3, 3), float, latticeloc)
        self.g = arraydata((3, 3), float, gloc)

        self.params = {}

        property_code_map = {1: 'I', 2: 'R', 3: 'S', 4: 'L'}
        self.properties = OrderedDict()
        for i in range(libatoms.atoms_n_properties(self._atomsptr)):
            key, (code, startcol, stopcol) = libatoms.atoms_nth_property(
                self._atomsptr, i + 1)
            self.properties[key.strip()] = (property_code_map[code],
                                            slice(startcol - 1, stopcol))

        self.repoint()
Exemple #19
0
    def dispatch_request(self, *args, **kwargs):
        # Taken from flask
        #noinspection PyUnresolvedReferences
        meth = getattr(self, request.method.lower(), None)
        if meth is None and request.method == 'HEAD':
            meth = getattr(self, 'get', None)
        assert meth is not None, 'Unimplemented method %r' % request.method

        if isinstance(self.method_decorators, Mapping):
            decorators = self.method_decorators.get(request.method.lower(), [])
        else:
            decorators = self.method_decorators

        for decorator in decorators:
            meth = decorator(meth)

        resp = meth(*args, **kwargs)

        if isinstance(resp, ResponseBase):  # There may be a better way to test
            return resp

        representations = self.representations or OrderedDict()

        #noinspection PyUnresolvedReferences
        mediatype = request.accept_mimetypes.best_match(representations,
                                                        default=None)
        if mediatype in representations:
            data, code, headers = unpack(resp)
            resp = representations[mediatype](data, code, headers)
            resp.headers['Content-Type'] = mediatype
            return resp

        return resp
Exemple #20
0
    def alloc(self, n=0, n_int=0, n_real=3, n_str=1, n_logical=0, use_libatoms=False, atomsptr=None, properties=None, \
                 lattice=numpy.array([[100.,0.,0.],[0.,100.,0.],[0.,0.,100.]]), \
                 params=ParamReader(),element='Si'):

        if use_libatoms or atomsptr is not None:
            if atomsptr is None:
                self.attach(libatoms.atoms_initialise(n, lattice))
            else:
                self.attach(atomsptr)
        else:
            self.n = n
            self.lattice = lattice
            self.g = numpy.linalg.inv(self.lattice)
            self.params = params

            # Create single property for atomic positions
            self.real = numpy.zeros((self.n, n_real), dtype=float)
            self.int = numpy.zeros((self.n, n_int), dtype=int)
            self.str = numpy.zeros((self.n, n_str), dtype='S10')
            self.logical = numpy.zeros((self.n, n_logical), dtype=bool)

            if properties is None:
                self.properties = OrderedDict({
                    'species': ('S', slice(0, 1)),
                    'pos': ('R', slice(0, 3))
                })
            else:
                self.properties = properties

            self.repoint()
Exemple #21
0
    def __init__(self, config, quit_check_callback=None):
        """constructor for a registration object that runs an LRU cache
       cleaner"""
        self.config = config

        self.directory = os.path.abspath(config.symbol_cache_path)
        self.max_size = config.symbol_cache_size
        self.verbosity = config.verbosity
        # Cache state
        self.total_size = 0
        self._lru = OrderedDict()
        # pyinotify bits
        self._wm = pyinotify.WatchManager()
        self._handler = EventHandler(self, verbosity=config.verbosity)
        self._notifier = pyinotify.ThreadedNotifier(self._wm, self._handler)
        mask = pyinotify.IN_DELETE | pyinotify.IN_CREATE \
            | pyinotify.IN_OPEN | pyinotify.IN_MOVED_FROM \
            | pyinotify.IN_MOVED_TO | pyinotify.IN_MODIFY
        self._wdd = self._wm.add_watch(self.directory,
                                       mask,
                                       rec=True,
                                       auto_add=True)
        # Load existing files into the cache.
        self._get_existing_files(self.directory)
        self._notifier.start()
Exemple #22
0
        def parse_raw_package_data(raw):

            try:
                codename, component, arch, deb = re.split('\|+| ', raw)
                fields = OrderedDict({'codename': codename, 'component': component, 'arch': arch, 'deb': deb})
            except Exception as e:
                logging.warn(e)
                logging.warn('unparseable result 1=> %s' % raw)
                fields = None

            try:
                package_info = dpkg_deb().action('--show', os.path.join(self.options.basedir,deb))[0]
                #print package_info
                package, version, description, replaces, maintainer = package_info.split('\t')
                fields['package'] = package
                fields['version'] = version
                fields['description'] = description
                fields['replaces'] = replaces
                fields['maintainer'] = maintainer
            except RepositoryException as e:
                logging.warn(e)
                logging.warn('dpkg fail')
                fields = None

            except Exception as e:
                logging.warn(e)
                logging.warn(package_info)
                logging.warn('unparseable result 2=> %s' % raw)
                fields = None

            return fields
 def test_clear(self):
     pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
     shuffle(pairs)
     od = OrderedDict(pairs)
     self.assertEqual(len(od), len(pairs))
     od.clear()
     self.assertEqual(len(od), 0)
Exemple #24
0
    def _register(cls, func, call_prehook=None):
        options = OrderedDict()
        if PY3:
            argcount = func.__code__.co_argcount
            varnames = func.__code__.co_varnames[:argcount]
            defaults = list(func.__defaults__ or [])
        else:
            argcount = func.func_code.co_argcount
            varnames = func.func_code.co_varnames[:argcount]
            defaults = list(func.func_defaults or [])
        defaults = [required] * (argcount - len(defaults)) + defaults
        for name, val in zip(varnames, defaults):
            options[name] = val

        cmdobj = cls(
            func,
            options,
            [l.strip() for l in func.__doc__.splitlines() if l][0]
        )
        cmdobj.call_prehook = call_prehook

        name = func.__name__.replace('_', '-')
        commands[name] = cmdobj
        func.cmdobj = cmdobj
        return func
    def parse(self, stream):
        """Parses the keys + values from a config file."""

        items = OrderedDict()
        for i, line in enumerate(stream):
            line = line.strip()
            if not line or line[0] in ["#", ";", "["] or line.startswith("---"):
                continue
            white_space = "\\s*"
            key = "(?P<key>[^:=;#\s]+?)"
            value = white_space+"[:=\s]"+white_space+"(?P<value>.+?)"
            comment = white_space+"(?P<comment>\\s[;#].*)?"

            key_only_match = re.match("^" + key + comment + "$", line)
            if key_only_match:
                key = key_only_match.group("key")
                items[key] = "true"
                continue

            key_value_match = re.match("^"+key+value+comment+"$", line)
            if key_value_match:
                key = key_value_match.group("key")
                value = key_value_match.group("value")

                if value.startswith("[") and value.endswith("]"):
                    # handle special case of lists
                    value = [elem.strip() for elem in value[1:-1].split(",")]

                items[key] = value
                continue

            raise ConfigFileParserException("Unexpected line %s in %s: %s" % (i,
                getattr(stream, 'name', 'stream'), line))
        return items
Exemple #26
0
 def __init__(self, filename, file_content=None, **keywords):
     self.workbook = xlrd.open_workbook(filename,
                                        file_contents=file_content)
     self.mysheets = OrderedDict()
     for name in self.workbook.sheet_names():
         data = to_array(XLSheet(self.workbook.sheet_by_name(name)))
         self.mysheets[name] = data
Exemple #27
0
def tree_to_repos(tree):
    """Given a TreeConfig, return a mapping {root: Vcs object} where root is a
    directory under tree.source_folder where root is a directory under
    tree.source_folder. Traversal of the returned mapping follows the order of
    deepest directory first.

    :arg tree: TreeConfig object representing a source code tree

    """
    sources = {}
    # Find all of the VCSs in the source directory:
    # We may see multiple VCS if we use git submodules, for example.
    for cwd, dirs, files in os.walk(tree.source_folder):
        for vcs in every_vcs:
            attempt = vcs.claim_vcs_source(cwd, dirs, tree)
            if attempt is not None:
                sources[attempt.root] = attempt

    # It's possible that the root of the tree is not a VCS by itself, so walk up
    # the hierarchy until we find a parent folder that is a VCS. If we can't
    # find any, then no VCSs exist for the top level of this repository.
    directory = tree.source_folder
    while directory != '/' and directory not in sources:
        directory = os.path.dirname(directory)
        for vcs in every_vcs:
            attempt = vcs.claim_vcs_source(directory, os.listdir(directory), tree)
            if attempt is not None:
                sources[directory] = attempt
    lookup_order = sorted(sources.keys(), key=len, reverse=True)
    # We want to make sure that we look up source repositories by deepest
    # directory first.
    ordered_sources = OrderedDict()
    for key in lookup_order:
        ordered_sources[key] = sources[key]
    return ordered_sources
Exemple #28
0
 def get_restful_link_metadata(self):
     metadata = OrderedDict()
     metadata['href'] = self.get_restful_url()
     metadata['rel'] = 'alternate'
     metadata['title'] = self.name
     metadata['type'] = 'application/json'
     return metadata
Exemple #29
0
def get_column_types(csv_filename):
    csv_reader = csv.reader(open(csv_filename, 'rb'), delimiter=',', quotechar='"')
    types = OrderedDict()
    current = 0
    for row in csv_reader:
        # The first row contains column names.  Use them to build the dictionary keys.
        if current == 0:
            for item in row:
                types[item] = NoneType
        # The second row contains column types.  Use them to build the dictionary values.
        elif current == 1:
            index = 0
            for item in types:
                itemType = row[index]
                if itemType == "INT":
                	if have_pandas:
                		type = FloatType
                	else:
                		type = IntType
                elif itemType == "FLOAT":
                    type = FloatType
                else:
                    type = StringType

                types[item] = type
                index += 1

            return types

        current += 1
Exemple #30
0
def filterReply(bugInfo, statuses, resolutions):
    ignoreFields = ["id", "type", "description", "project"]
    newBugInfo = OrderedDict()
    transfer(bugInfo, newBugInfo, "key")
    transfer(bugInfo, newBugInfo, "summary")
    newBugInfo["status"] = findId(statuses, bugInfo["status"])
    if bugInfo.has_key("resolution"):
        newBugInfo["resolution"] = findId(resolutions,
                                          bugInfo["resolution"]) + "\n"
    else:
        transfer(bugInfo, newBugInfo, "assignee", "\n")
    newBugInfo["components"] = convertToString(bugInfo["components"])
    priorityStr = convertToString(bugInfo["priority"])
    priorityStr = str(int(priorityStr) -
                      1) if priorityStr.isdigit() else priorityStr
    remainder = filter(
        lambda k: k not in ignoreFields and
        (k not in newBugInfo or k == "priority") and isInteresting(bugInfo[k]),
        bugInfo.keys())
    remainder.sort()
    for key in remainder:
        if key == "priority":
            newBugInfo["priority"] = str(priorityStr)
        else:
            transfer(bugInfo, newBugInfo, key)
    return newBugInfo