Example #1
1
def get_treasury_source():
    url = """\
http://data.treasury.gov/feed.svc/DailyTreasuryYieldCurveRateData\
"""
    res = requests.get(url, stream=True)
    stream = iter_to_stream(res.text.splitlines())

    elements = ET.iterparse(stream, ("end", "start-ns", "end-ns"))

    namespaces = OrderedDict()
    properties_xpath = [""]

    def updated_namespaces():
        if "" in namespaces and "m" in namespaces:
            properties_xpath[0] = "{%s}content/{%s}properties" % (namespaces[""], namespaces["m"])
        else:
            properties_xpath[0] = ""

    for event, element in elements:
        if event == "end":
            tag = get_localname(element)
            if tag == "entry":
                properties = element.find(properties_xpath[0])
                datum = {get_localname(node): node.text for node in properties if ET.iselement(node)}
                # clear the element after we've dealt with it:
                element.clear()
                yield datum

        elif event == "start-ns":
            namespaces[element[0]] = element[1]
            updated_namespaces()

        elif event == "end-ns":
            namespaces.popitem()
            updated_namespaces()
Example #2
1
class RingCache(object):
    def __init__(self, maxEntries=100, isAutoAdd=False):
        self.max = maxEntries
        self.d = OrderedDict()
        self.isAutoAdd = isAutoAdd

    def add(self, k, v=None):
        if self.max <= len(self.d):
            self.d.popitem(last=False)
        if k in self.d:
            del (self.d[k])
        self.d[k] = v

    def get(self, k):
        return self.d[k]

    def remove(self, k):
        del (self.d[k])

    def __contains__(self, k):
        if k in self.d:
            v = self.d[k]
            del (self.d[k])
            self.d[k] = v
            return True
        else:
            if self.isAutoAdd:
                self.add(k)
            return False

    def __len__(self):
        return len(self.d)

    def __repr__(self):
        return self.d.__repr__()
Example #3
1
    def __setitem__(self, key, value):
        """
				>>> T = pykov.Matrix()
				>>> T[('A','B')] = .3
				>>> T
				{('A', 'B'): 0.3}
				>>> T['A','A'] = .7
				>>> T
				{('A', 'B'): 0.3, ('A', 'A'): 0.7}
				>>> T['B','B'] = 0
				>>> T
				{('A', 'B'): 0.3, ('A', 'A'): 0.7}
				>>> T['A','A'] = 0
				>>> T
				{('A', 'B'): 0.3}

				>>> T = pykov.Matrix({('A','B'): 3, ('A','A'): 7, ('B','A'): .1})
				>>> T.states()
				{'A', 'B'}
				>>> T['A','C']=1
				>>> T.states()
				{'A', 'B', 'C'}
				>>> T['A','C']=0
				>>> T.states()
				{'A', 'B'}
				"""
        if abs(value) > numpy.finfo(numpy.float).eps:
            value = int(value * 1000) * 1.0 / 1000.0
            OrderedDict.__setitem__(self, key, value)
        elif key in self:
            del (self[key])
Example #4
1
def download_replies(modeladmin, request, queryset):
    output = BytesIO()
    workbook = Workbook(output, {"in_memory": True})
    sheet = workbook.add_worksheet("test")

    question_columns = OrderedDict()
    max_column = 0
    row = 1
    for reply in queryset:
        for question in reply.answers.keys():
            current_column = question_columns.get(question, max_column)
            if current_column == max_column:
                question_columns[question] = max_column
                max_column += 1
            sheet.write(row, current_column, reply.answers[question])
        row += 1

    responses = {}
    for reply in queryset:
        for question in reply.answers.keys():
            if responses.get(question) is None:
                responses[question] = []
            responses[question].append(reply.answers[question])
    for question in question_columns.keys():
        sheet.write(0, question_columns[question], question)

    workbook.close()
    output.seek(0)
    response = HttpResponse(
        output.read(), content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
    )
    response["Content-Disposition"] = "attachment; filename=test.xlsx"
    return response
Example #5
1
class Config(object):
    def __init__(self, axes, positions):
        def sort_key(nx):
            name, axis = nx
            try:
                return (0, "xyz".index(name))
            except ValueError:
                return (1, name)

        self.axes = OrderedDict(sorted(((a.name, a) for a in axes), key=sort_key))
        self.positions = {pos.name: pos for pos in positions}

    @property
    def x(self):
        return self.axes.get("x")

    @property
    def y(self):
        return self.axes.get("y")

    @property
    def z(self):
        return self.axes.get("z")

    @property
    def home(self):
        return self.positions.get("home")

    @property
    def origin(self):
        return self.positions.get("origin")
Example #6
1
 def group(self, context, v, _get=ExpressionModifiersBase._lookup_key):
     seq, key = v
     result = OrderedDict()
     for item in seq:
         k = _get(item, key)
         result.setdefault(k, []).append(item)
     return result
Example #7
1
 def unique_parameters(self):
     # We actually need to remove duplicates from the list of parameters
     # (and their corresponding gradients) in order to support reusing
     # the same layer at multiple places in the graph,
     # e.g. do weight sharing.
     params, grads = self.parameters()
     return (list(_OrderedDict.fromkeys(params).keys()), list(_OrderedDict.fromkeys(grads).keys()))
Example #8
1
    def __init__(self):
        # Current theme
        self.theme_key = ""
        self.default = "solarized_lt"
        self.hsv = [0.00, 0.29, 0.35]  # dark rose
        self.theme_contrast = 65
        self.d = OrderedDict()
        self._ui_palette = None
        self._palette = None
        self._accent_palettes = {}
        self.current_hex = ""
        self.transparent = Qt.transparent
        self.gradient = QtGui.QRadialGradient(0, 0, 300)
        self.gradient.setSpread(QtGui.QGradient.PadSpread)
        self.custom = False
        # Theme management
        self.default_themes = color_themes
        self.custom_themes = OrderedDict()

        # Create some defaults
        self.activate_color_theme(self.default, try_to_remember=False)

        # Keep an eye on relevant changes
        ctrl.add_watcher(self, "document_changed")
        ctrl.add_watcher(self, "color_themes_changed")
Example #9
1
def read_parameters():
    SECTION = "default"

    parser = OptionParser()
    config = configparser.ConfigParser()
    config.read("environments.conf")
    environments = OrderedDict()

    for option in config.options(SECTION):
        environments[option] = config.get(SECTION, option)

    opt_environments = ", ".join(environments.keys())
    default = next(reversed(environments))
    parser.add_option(
        "-e",
        "--environment",
        dest="environment",
        default=default,
        type="choice",
        choices=list(environments.keys()),
        help="set default environment to: " + opt_environments,
    )

    (options, args) = parser.parse_args()

    return environments.get(options.environment, None)
Example #10
1
 def __init__(self, *args, **kwds):
     self.size_limit = kwds.pop("size_limit", 300000)
     self.size_grace = kwds.pop("size_grace", 100000)
     self.overflow_callback = kwds.pop("overflow_callback", None)
     self.overflow = False
     OrderedDict.__init__(self, *args, **kwds)
     self._check_size_limit()
Example #11
1
def merge_data_for_people(people, models):
    """
        Collect data for a certain set of people from a list of model objects.
        Merge results from models that have the same name.
    """

    # All headers from the models
    all_headers = list(chain.from_iterable([m.analytics_headers() for m in models]))

    # Initialize a dict containing all people each with all headers,
    # and default values of '' for each header
    data = OrderedDict()
    headers = []
    for h in all_headers:
        if h["title"] not in data.keys():
            data[h["title"]] = ""
            headers.append(h)
    persondata = dict((p, data.copy()) for p in people)
    persondata["_headers"] = headers

    for m in models:
        mdata = m.analytics_data_by_person()
        for p in people:
            if p in mdata:
                persondata[p].update(mdata[p])
    return persondata
Example #12
1
    def meta_create_private_school_schema(self):
        import csv
        from collections import OrderedDict

        if not self.database.exists():
            self.database.create()

        file_name = self.filesystem.path(self.metadata.build.name_transforms)

        with open(file_name) as f:
            reader = csv.reader(f)  # Don't use DictReader -- need to preserve ordering.
            header = reader.next()

            fields = OrderedDict()
            for row in reader:
                row = dict(zip(header, row))
                fields[row["to_name"]] = row["size"]

        table_name = "private_schools"

        with self.session:
            table = self.schema.add_table(table_name)
            table.add_column("id", datatype="integer", is_primary_key=True)

            for field, size in fields.items():
                table.add_column(field, datatype="varchar", width=int(size), description="")
Example #13
0
def get_fielddata(model_object, search_tables, field_information_filter=None, extra_properties=[]):
    """
    returns an ordered dict of field_name->{value:value,fieldinformation:fi}
    to be used to display the item in the UI Detail views
    extra_properties are non-standard getters that wouldn't normally be returned (restricted fields)
    """
    # dump(self.dataset)
    # data=model_to_dict(self.dataset)
    property_dict = get_properties(model_object)
    if len(extra_properties) > 0:
        for prop in extra_properties:
            property_dict[prop] = getattr(model_object, prop)
            logger.info(str(("got extra prop", prop, getattr(model_object, prop))))

    logger.debug(str(("property_dict", property_dict)))
    ui_dict = {}
    for field, value in property_dict.iteritems():
        logger.debug(str(("get_field_info", field)))
        details = {}
        try:
            fi = FieldInformation.manager.get_column_fieldinformation_by_priority(field, search_tables)

            if fi and (field_information_filter and field_information_filter(fi) or field_information_filter == None):
                details["fieldinformation"] = fi
                details["value"] = value
                ui_dict[field] = details
                # ui_dict[fi.get_verbose_name()] = value
            else:
                logger.debug(str(("field not shown in this view: ", field, value)))
        except (ObjectDoesNotExist, MultipleObjectsReturned, Exception) as e:
            logger.debug(str(("no field information defined for: ", field, value)))
    ui_dict = OrderedDict(sorted(ui_dict.items(), key=lambda x: x[1]["fieldinformation"].order))
    if logger.isEnabledFor(logging.DEBUG):
        logger.debug(str(("ui_dict", ui_dict)))
    return ui_dict
Example #14
0
def cast_parameters(xml_block, default_parameters):
    """
    Cast parameters of an xml block with the default parameters
    """
    result = ODict(default_parameters)
    for name, value in xml_block.parameters.items():
        if name not in default_parameters:
            msg = "Action '{}' has no parameters called '{}'"
            msg = msg.format(xml_block.block_id, name)
            raise ActionCreationError(msg)
        try:
            if isinstance(default_parameters[name], bool) and isinstance(value, basestring):
                if value.lower() in ["true", "1"]:
                    cast_value = True
                elif value.lower() in ["false", "0"]:
                    cast_value = False
                else:
                    raise Exception()
            else:
                cast_value = type(default_parameters[name])(value)
        except:
            msg = "Error while casting parameters '{}' of action '{}'"
            msg = msg.format(name, xml_block.block_id)
            raise ActionCreationError(msg)
        result[name] = cast_value
    # Save values
    xml_block.parameters = ODict(result)
    # Cast Enum to string
    for name, value in result.items():
        if isinstance(value, BaseEnum):
            result[name] = unicode(value)
    return result
Example #15
0
    def load_config_file(self, config_path):
        """load and parse yaml config file"""

        if not path.exists(config_path):
            raise IOError('Unable to find config file "{}"'.format(config_path))

        self.config_path = path.abspath(config_path)

        with open(config_path) as f:
            self.config = yaml.load(f.read())

        # parse initial extent
        extent = self.config["initial_extent"]
        self.map_extent = [extent["xmin"], extent["ymin"], extent["xmax"], extent["ymax"]]

        # parse plots
        self.axes = OrderedDict()
        for p in self.config["axes"]:
            self.axes[p["name"]] = (p["name"], p["xaxis"], p["yaxis"])
        self.active_axes = list(self.axes.values())[0]

        # parse summary field
        self.fields = OrderedDict()
        for f in self.config["summary_fields"]:
            self.fields[f["name"]] = f["field"]
        self.field = list(self.fields.values())[0]
Example #16
0
 def getFullNumber(self):
     """Combines Address components to create a full address label
     @return: String"""
     # in some instance we could go to the API get 'full number' however this
     # is not included in responses hence the need for this method
     d = OrderedDict(
         [
             ("_components_unitValue", "{}/"),
             ("_components_addressNumber", "{}"),
             ("_components_addressNumberHigh", "-{}"),
             ("_components_addressNumberSuffix", "{}"),
         ]
     )
     fullNumber = ""
     for k, v in d.items():
         if self._changeType in ("Update", "Add") or self.meta.requestId:  # request objs are flat
             if hasattr(self, k):
                 numComponent = getattr(self, k)
                 if numComponent:
                     fullNumber += v.format(numComponent)  # skip Nones in resp objs
         else:
             if hasattr(getattr(getattr(self, "meta"), "entities")[0], k):
                 numComponent = getattr(getattr(getattr(self, "meta"), "entities")[0], k)
                 if numComponent:
                     fullNumber += v.format(numComponent)
     return fullNumber
Example #17
0
def get_insight_data(request):
    insight = request.GET["insight"]
    abscissa = request.GET["abscissa"]
    vary_by = request.GET.get("vary_by")
    inputs = json.loads(request.GET["inputs"])
    inputs.pop(abscissa)
    if vary_by:
        inputs.pop(vary_by)
    defaults = get_default_inputs(insight)

    cb = Couchbase.connect(bucket="experiments", **settings.COUCHBASE_SERVER)

    data = defaultdict(list)
    for row in cb.query("experiments", "experiments_by_name", key=insight, stale=False):
        value = row.value
        value_inputs = dict(defaults, **value["inputs"])
        if dict(value_inputs, **inputs) == value_inputs:
            key = value["inputs"].get(vary_by, defaults.get(vary_by))
            data[key].append((value_inputs[abscissa], value["value"]))
    for k, v in data.items():
        v.sort(key=lambda xy: xy[0])
    data = OrderedDict(sorted(data.items()))

    content = json.dumps(data)
    return HttpResponse(content)
Example #18
0
    def __new__(mcs, name, bases, attrs):
        # Collect fields from current class.
        current_fields = []
        for key, value in list(attrs.items()):
            if isinstance(value, Field):
                current_fields.append((key, value))
                attrs.pop(key)
        current_fields.sort(key=lambda x: x[1].creation_counter)
        attrs["declared_fields"] = OrderedDict(current_fields)

        new_class = super(DeclarativeFieldsMetaclass, mcs).__new__(mcs, name, bases, attrs)

        # Walk through the MRO.
        declared_fields = OrderedDict()
        for base in reversed(new_class.__mro__):
            # Collect fields from base class.
            if hasattr(base, "declared_fields"):
                declared_fields.update(base.declared_fields)

            # Field shadowing.
            for attr, value in base.__dict__.items():
                if value is None and attr in declared_fields:
                    declared_fields.pop(attr)

        new_class.base_fields = declared_fields
        new_class.declared_fields = declared_fields

        return new_class
Example #19
0
    def _adjust_plot_labels(plot_style, plots):
        """Plot labels can only be changed after the data has been plotted."""
        # We add multiple instances of line descriptions - get rid of them via OrderedDict
        handles, labels = plots[0].get_legend_handles_labels()
        by_label = OrderedDict(zip(labels, handles))
        if plots[0] is not plots[-1]:
            # If we have subplots, add legend to a separate subplot.
            main_plot = plots[1]
            kwargs = dict(bbox_to_anchor=(0, 0, 1, 1), mode="expand", loc="lower left")
        else:
            main_plot = plots[0]
            kwargs = dict(loc="best")
        if plot_style == "slide":
            kwargs["fontsize"] = 30
        plots[-1].legend(by_label.values(), by_label.keys(), ncol=2, **kwargs)

        major_locator = mdates.AutoDateLocator(minticks=3, maxticks=6)
        formatter = mdates.AutoDateFormatter(major_locator)
        formatter.scaled = {
            mdates.DAYS_PER_YEAR: "%Y",
            mdates.DAYS_PER_MONTH: "%b",
            mdates.DAYS_PER_WEEK: "CW %V",
            1.0: "%m-%d",
            1.0 / mdates.HOURS_PER_DAY: "%H:%M:%S",
            1.0 / mdates.MINUTES_PER_DAY: "%H:%M:%S.%f",
        }

        main_plot.xaxis.set_major_locator(major_locator)
        main_plot.xaxis.set_minor_locator(mticker.AutoMinorLocator(n=mdates.DAYS_PER_WEEK))
        main_plot.xaxis.set_major_formatter(formatter)
Example #20
0
    def get_volume_types_data(self):
        try:
            volume_types = cinder.volume_type_list_with_qos_associations(self.request)
        except Exception:
            volume_types = []
            exceptions.handle(self.request, _("Unable to retrieve volume types"))

        encryption_allowed = policy.check((("volume", "volume_extension:volume_type_encryption"),), self.request)

        if encryption_allowed:
            # Gather volume type encryption information
            try:
                vol_type_enc_list = cinder.volume_encryption_type_list(self.request)
            except Exception:
                vol_type_enc_list = []
                msg = _("Unable to retrieve volume type encryption information.")
                exceptions.handle(self.request, msg)

            vol_type_enc_dict = OrderedDict([(e.volume_type_id, e) for e in vol_type_enc_list])
            for volume_type in volume_types:
                vol_type_enc = vol_type_enc_dict.get(volume_type.id, None)
                if vol_type_enc is not None:
                    volume_type.encryption = vol_type_enc
                    volume_type.encryption.name = volume_type.name
                else:
                    volume_type.encryption = None

        return volume_types
    def test_supported_input(self):
        xyvalues = OrderedDict()
        y_python = xyvalues["python"] = [2, 3, 7, 5, 26]
        y_pypy = xyvalues["pypy"] = [12, 33, 47, 15, 126]
        y_jython = xyvalues["jython"] = [22, 43, 10, 25, 26]

        xyvaluesdf = pd.DataFrame(xyvalues)

        for i, _xy in enumerate([xyvalues, xyvaluesdf]):
            hm = create_chart(Line, _xy)
            builder = hm._builders[0]
            self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
            assert_array_equal(builder._data["x"], [0, 1, 2, 3, 4])
            assert_array_equal(builder._data["y_python"], y_python)
            assert_array_equal(builder._data["y_pypy"], y_pypy)
            assert_array_equal(builder._data["y_jython"], y_jython)

        lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
        for _xy in [lvalues, np.array(lvalues)]:
            hm = create_chart(Line, _xy)
            builder = hm._builders[0]
            self.assertEqual(builder._groups, ["0", "1", "2"])
            assert_array_equal(builder._data["x"], [0, 1, 2, 3, 4])
            assert_array_equal(builder._data["y_0"], y_python)
            assert_array_equal(builder._data["y_1"], y_pypy)
            assert_array_equal(builder._data["y_2"], y_jython)
Example #22
0
def get_language_config(content_language=None):
    language = get_language()[:2]
    if content_language:
        content_language = content_language[:2]
    else:
        content_language = language

    config = {}
    config["language"] = language

    lang_names = OrderedDict()
    for lang, name in settings.LANGUAGES:
        if lang[:2] not in lang_names:
            lang_names[lang[:2]] = []
        lang_names[lang[:2]].append(_(name))
    sp_langs = []
    for lang, names in lang_names.items():
        if lang == content_language:
            default = "+"
        else:
            default = ""
        sp_langs.append("{!s}{!s}={!s}".format(default, " / ".join(names), lang))

    config["spellchecker_languages"] = ",".join(sp_langs)

    if content_language in settings.LANGUAGES_BIDI:
        config["directionality"] = "rtl"
    else:
        config["directionality"] = "ltr"

    if tinymce.settings.USE_SPELLCHECKER:
        config["spellchecker_rpc_url"] = reverse("tinymce-spellcheck")

    return config
Example #23
0
    def expand_uids_to_download(self, crispin_client, uids, metadata):
        # During Gmail initial sync, we expand threads: given a UID to
        # download, we want to also download other UIDs on the same thread, so
        # that you don't see incomplete thread views for the duration of the
        # sync. Given a 'seed set' of UIDs, this function returns a generator
        # which yields the 'expanded' set of UIDs to download.
        thrids = OrderedDict()
        for uid in sorted(uids, reverse=True):
            g_thrid = metadata[uid].g_thrid
            if g_thrid in thrids:
                thrids[g_thrid].append(uid)
            else:
                thrids[g_thrid] = [uid]

        for g_thrid, uids in thrids.items():
            g_msgid = metadata[uids[0]].g_msgid
            # Because `uids` is ordered newest-to-oldest here, uids[0] is the
            # last UID on the thread. If g_thrid is equal to its g_msgid, that
            # means it's also the first UID on the thread. In that case, we can
            # skip thread expansion for greater sync throughput.
            if g_thrid != g_msgid:
                uids = set(uids).union(crispin_client.expand_thread(g_thrid))
                metadata.update(crispin_client.g_metadata(uids))
            for uid in sorted(uids, reverse=True):
                yield uid
Example #24
0
    def symlink_subprojects(self):
        """Symlink project subprojects

        Link from $WEB_ROOT/projects/<project> ->
                  $WEB_ROOT/<project>
        """
        subprojects = set()
        rels = self.get_subprojects()
        if rels.count():
            # Don't creat the `projects/` directory unless subprojects exist.
            if not os.path.exists(self.subproject_root):
                os.makedirs(self.subproject_root)
        for rel in rels:
            # A mapping of slugs for the subproject URL to the actual built
            # documentation
            from_to = OrderedDict({rel.child.slug: rel.child.slug})
            subprojects.add(rel.child.slug)
            if rel.alias:
                from_to[rel.alias] = rel.child.slug
                subprojects.add(rel.alias)
            for from_slug, to_slug in from_to.items():
                self._log(u"Symlinking subproject: {0} -> {1}".format(from_slug, to_slug))
                symlink = os.path.join(self.subproject_root, from_slug)
                docs_dir = os.path.join(self.WEB_ROOT, to_slug)
                symlink_dir = os.sep.join(symlink.split(os.path.sep)[:-1])
                if not os.path.lexists(symlink_dir):
                    os.makedirs(symlink_dir)
                run("ln -nsf %s %s" % (docs_dir, symlink))

        # Remove old symlinks
        if os.path.exists(self.subproject_root):
            for subproj in os.listdir(self.subproject_root):
                if subproj not in subprojects:
                    os.unlink(os.path.join(self.subproject_root, subproj))
class Model:
    def __init__(self):
        self.MinPosition = Vector3([sys.float_info.max, sys.float_info.max, sys.float_info.max])
        self.MaxPosition = Vector3([-sys.float_info.max, -sys.float_info.max, -sys.float_info.max])
        self.Positions = []
        self.UVs = []
        self.Normals = []
        self.Meshes = []
        self.Vertices = OrderedDict()
        self.MaterialLib = None
        self.GenerateCollisionData = False

    def Compile(self, filename):
        if INVERT_Z_COMPONENT:
            for i in range(len(self.Positions)):
                self.Positions[i].z = -self.Positions[i].z

                # AABB calculation
        for pos in self.Positions:
            self.MinPosition = self.MinPosition.Min(Vector3([pos.x, pos.y, pos.z]))
            self.MaxPosition = self.MaxPosition.Max(Vector3([pos.x, pos.y, pos.z]))

        with zipfile.ZipFile(filename, "w", zipfile.ZIP_DEFLATED) as zf:
            # TODO: Write out the vertex description so it's not hard coded in the game

            # Be very careful re-ordering anything below! Many of these Compile calls have
            # side effects.

            model = StringIO.StringIO()
            WriteVector3(model, self.MinPosition)
            WriteVector3(model, self.MaxPosition)

            # Write out each of the meshes
            meshes = StringIO.StringIO()
            WriteUInt(meshes, len(self.Meshes))
            for mesh in self.Meshes:
                mesh.Compile(meshes, self.Vertices, self.MaterialLib.Materials.keys())
            zf.writestr("__meshes__", meshes.getvalue())

            # Write out all the vertex data, interleaved
            WriteUInt(model, len(self.Vertices))
            for vtx in self.Vertices.keys():
                pos = self.Positions[vtx.Position]
                uv = self.UVs[vtx.UV]
                normal = self.Normals[vtx.Normal]

                WriteVector3(model, pos)
                WriteVector2(model, uv)
                WriteVector3(model, normal)
            zf.writestr("__model__", model.getvalue())

            self.MaterialLib.Compile(zf)

            collision = StringIO.StringIO()
            if self.GenerateCollisionData:
                WriteUInt(collision, len(self.Vertices))
                for vtx in self.Vertices.keys():
                    pos = self.Positions[vtx.Position]
                    WriteVector3(collision, pos)
                zf.writestr("__collision__", collision.getvalue())
Example #26
0
    def _collect_samples(self, file):
        """Read records into a hash keyed on ('Cruise', 'Relative Depth').
        The values of the hash are a list of sample_metadata (sm) hashes.
        For SIMZ cruises the convention has been to conduct one plankton pump 
        per group of 4 Niskin bottle trips, 3 pumps per cast. 
        """
        sm_hash = OrderedDict()
        with open(self.args.subsampleFile) as f:
            for r in csv.DictReader(f):
                sm = OrderedDict()
                sm["sampletype"] = r.get("Sample Type", "")
                sm["organism"] = r.get("Organism", "")
                sm["count"] = r.get("Count", "")
                sm["laboratory"] = r.get("Laboratory", "")
                sm["researcher"] = r.get("Researcher", "")
                sm["analysismethod"] = r.get("Analysis Method", "")
                sm[r.get("Comment Name")] = r.get("Comment Value", "")
                # Depends on a Comment Name of 'Relative Depth'
                key = (r.get("Cruise"), sm.get("Relative Depth"))

                try:
                    sm_hash[key].append(sm)
                except KeyError:
                    sm_hash[key] = []
                    sm_hash[key].append(sm)

        return sm_hash
Example #27
0
def plot_2D_projections(title, output_file, cluster_assignments, projections):
    """
  Visualize SDR cluster projections
  """

    color_list = colors.cnames.keys()
    plt.figure()
    color_list = color_list
    color_names = []
    for i in range(len(cluster_assignments)):
        cluster_id = int(cluster_assignments[i])
        if cluster_id not in color_names:
            color_names.append(cluster_id)
        projection = projections[i]
        label = "Category %s" % cluster_id
        if len(color_list) > cluster_id:
            color = color_list[cluster_id]
        else:
            color = "black"
        plt.scatter(projection[0], projection[1], label=label, alpha=0.5, color=color, marker="o", edgecolor="black")

    # Add nicely formatted legend
    handles, labels = plt.gca().get_legend_handles_labels()
    by_label = OrderedDict(zip(labels, handles))
    plt.legend(by_label.values(), by_label.keys(), scatterpoints=1, loc=2)

    plt.title(title)
    plt.draw()
    plt.savefig(output_file)
    print("==> saved: %s" % output_file)
    return plt
    def make_tabs(cls, projects):

        """A class method for creating dictionary of projects based on their start years
            and end years. The dictionary keys will be the years and the values will be 
            the projects that happen within the corresponding years. The years and projects are sorted 
            numerically and alphabetically. 

            Args: The Project class itself and the list of projects from netcdf.
            Ret: The sorted dictionary of years and projects. 

        """

        res = {}
        now = datetime.datetime.now()

        for project in projects:
            if project.end_year == None:
                project.end_year = now.year
            for year in list(range(project.start_year, project.end_year + 1)):
                if year not in res:
                    res[year] = []
                res[year].append(project)

        for year, projects in res.items():
            projects.sort(key=lambda x: x.name)

        res = OrderedDict(sorted(res.items(), key=lambda x: x[0]))

        return res
Example #29
0
def dofilehorz(filename):
    print filename
    out = open(filename.replace(".csv", ".trunc.csv"), "w")
    outcsv = UnicodeWriter(out)
    # do preparse
    with open(filename, "r") as f:
        for i, row in enumerate(UnicodeReader(f)):
            if i == 0:
                header = row
                headers = Counter(row)
                continue
            for c, cell in enumerate(row):
                size = (len(cell) / MAX_SIZE) + 1  # integer division is horrid.
                headers[header[c]] = max(headers[header[c]], size)
    # pass 2
    with open(filename, "r") as f:
        for i, row in enumerate(UnicodeReader(f)):
            if i == 0:
                newrow = []
                for c, cell in enumerate(header):
                    newrow.extend(["%s_%d" % (cell, r) for r in range(headers[cell])])
                outcsv.writerow(newrow)
                continue
            # populate dictionary
            d = OrderedDict()
            for c, cell in enumerate(row):
                for r in range(headers[header[c]]):
                    d["%s_%d" % (header[c], r)] = cell[MAX_SIZE * r : MAX_SIZE * (r + 1)]
            outcsv.writerow(d.values())
    out.close()
Example #30
0
 def parse(self):
     """
     Parse the source XML content for a GMF scenario.
     :returns:
         an iterable over triples (imt, gmvs, location)
     """
     tree = openquake.nrmllib.iterparse_tree(self.source, events=("end",))
     gmf = OrderedDict()  # (imt, location) -> gmvs
     point_value_list = []
     for _, element in tree:
         a = element.attrib
         if element.tag == self._NODE_TAG:
             point_value_list.append(["POINT(%(lon)s %(lat)s)" % a, a["gmv"]])
         elif element.tag == self._GMF_TAG:
             imt = a["IMT"]
             try:
                 imt += "(%s)" % a["saPeriod"]
             except KeyError:
                 pass
             for point, value in point_value_list:
                 try:
                     values = gmf[point, imt]
                 except KeyError:
                     gmf[point, imt] = [value]
                 else:
                     values.append(value)
             point_value_list = []
     for (location, imt), gmvs in gmf.iteritems():
         yield imt, "{%s}" % ",".join(gmvs), location