Example #1
1
class Config(object):
    def __init__(self):
        self.projects = OrderedDict()
        self._restore_config()

    @property
    def config_filepath(self):
        return os.path.expanduser("~/.dcm")

    @property
    def lib_dir(self):
        return os.path.expanduser("~/lib/sf")

    def write(self):
        parser = ConfigParser()
        parser.read(self.config_filepath, "utf-8")

        def ensure_section(section):
            if not parser.has_section(section):
                parser.add_section(section)

        ensure_section(SECTION_PROJECTS)
        for k, v in self.projects.items():
            parser.set(SECTION_PROJECTS, k, v)

        with open(self.config_filepath, "w") as f:
            parser.write(f)

    def _restore_config(self):
        parser = ConfigParser()
        parser.read(self.config_filepath, "utf-8")

        for group, options in parser.items():
            if group == SECTION_PROJECTS:
                self.projects.update(options)
Example #2
1
    def concatenate(self, otherlightcurve):
        """Concatenate another light curve. This function will check and remove
        any duplicate times. It will keep the column values from the original
        lightcurve to which the new lightcurve is being added.

        Parameters
        ----------
        otherlightcurve : `~sunpy.lightcurve.LightCurve`
            Another lightcurve of the same type.

        Returns
        -------
        newlc : `~sunpy.lightcurve.LightCurve`
            A new lightcurve.
        """
        if not isinstance(otherlightcurve, self.__class__):
            raise TypeError("Lightcurve classes must match.")

        meta = OrderedDict()
        meta.update({str(self.data.index[0]): self.meta.copy()})
        meta.update({str(otherlightcurve.data.index[0]): otherlightcurve.meta.copy()})

        data = self.data.copy().append(otherlightcurve.data)

        data["index"] = data.index
        # default behavior of drop_duplicates is keep the first column.
        data = data.drop_duplicates(subset="index")
        data.set_index = data["index"]
        data.drop("index", axis=1, inplace=True)
        return self.__class__.create(data, meta)
Example #3
1
    def create_post(self, path, **kw):
        content = kw.pop("content", None)
        onefile = kw.pop("onefile", False)
        kw.pop("is_page", False)

        metadata = OrderedDict()
        metadata.update(self.default_metadata)
        metadata.update(kw)
        makedirs(os.path.dirname(path))

        with codecs.open(path, "wb+", "utf8") as fd:
            if onefile:
                fd.write("#+BEGIN_COMMENT\n")
                if write_metadata:
                    fd.write(write_metadata(metadata))
                else:
                    for k, v in metadata.items():
                        fd.write(".. {0}: {1}\n".format(k, v))
                fd.write("#+END_COMMENT\n")
                fd.write("\n\n")

            if content:
                fd.write(content)
            else:
                fd.write("Write your post here.")
    def get_members(cls):
        members = OrderedDict()
        for base in cls.__bases__:
            inherited_members = get_declared(base, parameter)
            members.update(inherited_members)

        def generate_member_bindings():
            for name, obj in cls.__dict__.items():
                if isinstance(obj, member_class) and not name.startswith("__"):
                    yield name, obj
                if type(obj) is tuple and len(obj) == 1 and isinstance(obj[0], member_class):
                    raise TypeError(
                        "'%s' is a one-tuple containing what we are looking for.  Trailing comma much?  Don't... just don't."
                        % name
                    )

        bindings = generate_member_bindings()
        try:
            sorted_bindings = sorted(bindings, key=lambda x: sort_key(x[1]))
        except AttributeError:
            if sort_key is default_sort_key:
                raise TypeError("Missing member ordering definition. Use @creation_ordered or specify sort_key")
            else:
                raise
        members.update(sorted_bindings)

        return members
def run_query(regions, tiers):
    if len(regions) == 0:
        regions = static_data.regions
    if len(tiers) == 0:
        tiers = static_data.highest_achieved_season_tier
    outcome = ["won", "lost", "total"]

    champions = static_io.read_json("champions_by_id.json")

    data = query_io.read_json("champions.json")

    result = {}

    create_empty_result_dict(result, champions, outcome)
    query_champions_json(result, data, regions, tiers, outcome, champions)
    calculate_extras(result)

    final_result = {}
    for o in outcome:
        final_result[o] = []
        for c in result:
            dict = OrderedDict({"id": c, "name": champions[c]["name"], "key": champions[c]["key"]})
            dict.update(sorted((result[c][o]).items()))
            final_result[o].append(dict)

    return final_result
 def test_copying(self):
     # Check that ordered dicts are copyable, deepcopyable, picklable,
     # and have a repr/eval round-trip
     pairs = [("c", 1), ("b", 2), ("a", 3), ("d", 4), ("e", 5), ("f", 6)]
     od = OrderedDict(pairs)
     update_test = OrderedDict()
     update_test.update(od)
     for i, dup in enumerate(
         [
             od.copy(),
             copy.copy(od),
             copy.deepcopy(od),
             pickle.loads(pickle.dumps(od, 0)),
             pickle.loads(pickle.dumps(od, 1)),
             pickle.loads(pickle.dumps(od, 2)),
             pickle.loads(pickle.dumps(od, -1)),
             eval(repr(od)),
             update_test,
             OrderedDict(od),
         ]
     ):
         self.assertTrue(dup is not od)
         self.assertEqual(dup, od)
         self.assertEqual(list(dup.items()), list(od.items()))
         self.assertEqual(len(dup), len(od))
         self.assertEqual(type(dup), type(od))
Example #7
1
def traverse_and_remove_path(obj, path=None, match="First"):
    if path is None:
        path = []
    if isinstance(obj, dict):
        res = OrderedDict()
        for k, v in obj.items():
            cmatch = False
            if match and len(path) > 0 and path[0] == k:
                cmatch = True
            res.update({k: traverse_and_remove_path(v, path=path[1:], match=cmatch)})
        if len(path) == 1 and path[0] in res.keys() and match:
            del res[path[0]]
        return res
    elif isinstance(obj, list):
        res = []
        for i, elem in enumerate(obj):
            cmatch = False
            if match and len(path) >= 1 and isinstance(path[0], int) and path[0] < len(obj) and i == path[0]:
                cmatch = True
            res.append(traverse_and_remove_path(elem, path=path[1:], match=cmatch))
        if len(path) == 1 and isinstance(path[0], int) and path[0] < len(res) and match:
            res.pop(path[0])
        return res
    else:
        return obj  # no container, just values (str, int, float)
    def construct_yaml_map(self, node):
        data = OrderedDict()
        yield data
        value = self.construct_mapping(node)

        if isinstance(node, yaml.MappingNode):
            self.flatten_mapping(node)
        else:
            raise yaml.constructor.ConstructorError(
                None, None, "expected a mapping node, but found %s" % node.id, node.start_mark
            )

        mapping = OrderedDict()
        for key_node, value_node in node.value:
            key = self.construct_object(key_node, deep=False)
            try:
                hash(key)
            except TypeError as exc:
                raise yaml.constructor.ConstructorError(
                    "while constructing a mapping",
                    node.start_mark,
                    "found unacceptable key (%s)" % exc,
                    key_node.start_mark,
                )
            value = self.construct_object(value_node, deep=False)
            mapping[key] = value
        data.update(mapping)
    def _merge_attr(cls, attrname, process=None):
        attr_ret = OrderedDict()

        for base_class in cls.__bases__:
            if issubclass(base_class, MergeAttrMixin):
                attr_ret.update(base_class._merge_attr(attrname, process))

        if not hasattr(cls, attrname):
            return attr_ret

        new_attr = getattr(cls, attrname)

        if isinstance(new_attr, (list, tuple)):
            for item in new_attr:
                if item not in attr_ret:
                    if process is not None:
                        item = process(item)

                    attr_ret[item] = True

        elif isinstance(new_attr, dict):
            for key, value in new_attr.items():
                if process is not None:
                    value = process(value)

                attr_ret[key] = value

        return attr_ret
Example #10
0
def get_exp_fills(_exp_id):
    cur = dbutil.get_cur()
    cur.execute("CALL P_GetExpFills(%s,null,null)" % _exp_id)
    for r in cur:
        f = parse(r)

        quiz = QService.get_questionnaire(f.quiz_id)
        ss = quiz.score(f.answer, col_name=f.quiz_id)

        u = users.get(f.site_uid)
        if u is None:
            u = OrderedDict()
        u.update(ss)
        for s in ss:
            dimensions[s] = 0

        if f.quiz_id in quiz_with_answer:
            answers = quiz.parse(f.answer, col_name=f.quiz_id)
            u.update(answers)
            for a in answers:
                dimensions[a] = 0

        dim_cost_seconds = "%s_CostSeconds" % f.quiz_id
        dim_fill_time = "%s_FillTime" % f.quiz_id

        u[dim_cost_seconds] = f.cost_seconds
        u[dim_fill_time] = datetime.strftime(f.fill_time, "%Y/%m/%d %H:%M:%S")

        dimensions[dim_cost_seconds] = 0
        dimensions[dim_fill_time] = 0

        users[f.site_uid] = u
Example #11
0
    def apply_args(self, values, args={}, settings=None):
        new = OrderedDict(args)
        new.update(values)
        for k, v in new.items():
            new[k] = self.interpolate(v, new, settings)

        return new
def similar_top_opt3(wvectors, cvectors, words, topn=200, nthreads=4):
    wvectors.init_sims()
    cvectors.init_sims()

    indices = [wvectors.vocab[w].index for w in words if w in wvectors.vocab]
    wvecs = wvectors.syn0norm[indices]
    dists = np.dot(wvecs, cvectors.syn0norm.T)

    if nthreads == 1:
        res = dists2neighbours(wvectors, cvectors, dists, indices, topn)
    else:
        batchsize = int(ceil(1.0 * len(indices) / nthreads))
        print >> stderr, "dists2neighbours for %d words in %d threads, batchsize=%d" % (
            len(indices),
            nthreads,
            batchsize,
        )

        def ppp(i):
            return dists2neighbours(wvectors, cvectors, dists[i : i + batchsize], indices[i : i + batchsize], topn)

        lres = parallel_map(ppp, range(0, len(indices), batchsize), threads=nthreads)
        res = OrderedDict()
        for lr in lres:
            res.update(lr)

    return res
Example #13
0
 def to_json(self):
     out = OrderedDict()
     order = ["name", "maximize"]
     for i in order:
         val = self.__dict__[i]
         out.update({i: self.__dict__[i]})
     return out
Example #14
0
    def get_group(self, request_type):
        res = []
        tmp_res = OrderedDict()

        all_conf = self.all_conf.get_group(request_type)
        tmp_res.update(all_conf)

        tuner_conf = self.tuner_conf.get_group(request_type)
        tmp_res.update(tuner_conf)

        if request_type == "testcase":
            cases_conf = self.cases_conf.get_config()
            return cases_conf

        for key, value in tmp_res.items():
            res.append({"key": key, "value": value, "check": True, "dsc": ""})

        if request_type in self.required_lists:
            for required_key in self.required_lists[request_type]:
                if required_key not in tmp_res:
                    value = self.required_lists[request_type][required_key]
                    res.append({"key": required_key, "value": value, "check": False, "dsc": "please check or complete"})
                    self.set_config(request_type, required_key, value)

        return res
Example #15
0
    def get(self, request, format=None):
        response = OrderedDict(
            [
                ("locations", reverse("location-list", request=request)),
                ("timeseries", reverse("timeseries-list", request=request)),
                ("logicalgroups", reverse("logicalgroup-list", request=request)),
                ("sources", reverse("source-list", request=request)),
                ("summary", reverse("summary", request=request)),
            ]
        )

        user = getattr(request, "user", None)
        if user is not None and user.is_superuser:
            response.update(
                {
                    "layers": reverse("layer-list", request=request),
                    "collages": reverse("collage-list", request=request),
                    "collageitems": reverse("collageitem-list", request=request),
                    "workspaces": reverse("workspace-list", request=request),
                    "workspaceitems": reverse("workspaceitem-list", request=request),
                    "users": reverse("user-list", request=request),
                    "groups": reverse("usergroup-list", request=request),
                    "roles": reverse("role-list", request=request),
                }
            )
        return Response(response)
Example #16
0
def process_buffer(s, buff):

    myDict = OrderedDict()

    global COUNTER
    COUNTER += 1

    if COUNTER >= s.max_depth:
        myDict["Error"] = "Max depth of %s exceeded" % s.max_depth
        return myDict

    for module in disposition.default:
        myDict.update(invoke_module(s, module, buff, myDict))

    # Yara helps drive execution of modules and alerting, no Yara = nothing more to do for buffer
    if "SCAN_YARA" not in myDict:
        return myDict

    results = myDict["SCAN_YARA"].keys()
    YARA_RULES.extend(results)

    # Are there opportunities to run modules or set alert flag?
    for rule, modules, alert in disposition.triggers:
        if rule in results and alert:
            s.alert = True

        if rule in results and modules is not None:
            for module in modules:
                myDict.update(invoke_module(s, module, buff, myDict))

    return myDict
    def get_merged_strokes(strokes):
        def extend_stroke(stroke, vertices):
            for vert in map(StrokeVertex, vertices):
                stroke.insert_vertex(vert, stroke.stroke_vertices_end())
            return stroke

        base_strokes = tuple(stroke for stroke in strokes if not is_poly_clockwise(stroke))
        merged_strokes = OrderedDict((s, list()) for s in base_strokes)

        for stroke in filter(is_poly_clockwise, strokes):
            for base in base_strokes:
                # don't merge when diffuse colors don't match
                if diffuse_from_stroke(stroke) != diffuse_from_stroke(stroke):
                    continue
                # only merge when the 'hole' is inside the base
                elif stroke_inside_stroke(stroke, base):
                    merged_strokes[base].append(stroke)
                    break
                # if it isn't a hole, it is likely that there are two strokes belonging
                # to the same object separated by another object. let's try to join them
                elif get_object_name(base) == get_object_name(stroke) and diffuse_from_stroke(
                    stroke
                ) == diffuse_from_stroke(stroke):
                    base = extend_stroke(base, (sv for sv in stroke))
                    break
            else:
                # if all else fails, treat this stroke as a base stroke
                merged_strokes.update({stroke: []})
        return merged_strokes
def scrape_senate_member(output_list, membernode, majority_party):
    last_name = membernode.xpath("name/last")[0].text
    state = membernode.xpath("state")[0].text
    party = "majority" if membernode.xpath("party")[0].text == majority_party else "minority"
    title = membernode.xpath("position")[0].text
    if title == "Member":
        title = None
    if title == "Ranking":
        title = "Ranking Member"

    # look up senator by state and last name
    if not senators.has_key((state, last_name)):
        print "\t[%s] Unknown member: %s" % (state, last_name)
        return None

    moc = senators[(state, last_name)]

    entry = OrderedDict()
    if "official_full" in moc["name"]:
        entry["name"] = moc["name"]["official_full"]
    else:
        print "missing name->official_full field for", moc["id"]["bioguide"]
    entry["party"] = party
    entry["rank"] = (
        len([e for e in output_list if e["party"] == entry["party"]]) + 1
    )  # how many have we seen so far in this party, +1
    if title:
        entry["title"] = title
    entry.update(ids_from(moc["id"]))

    output_list.append(entry)

    # sort by party, then by rank, since we get the nodes in the XML in a rough seniority order that ignores party
    # should be done once at the end, but cleaner to do it here
    output_list.sort(key=lambda e: (e["party"] != "majority", e["rank"]))
Example #19
0
            def serialize_model(model):
                """
                Models are serialized by calling their 'serialize' method.

                Models that don't define a 'serialize' method are
                serialized as a dictionary of fields.

                Example:

                    {
                        'id': 1,
                        'title': 'Mmmm pie',
                        'content: 'Pie is good!'
                    }

                """

                if hasattr(model, "serialize"):
                    return serialize(model.serialize())
                else:
                    data = OrderedDict()
                    for field in model._meta.fields + model._meta.many_to_many:
                        data.update({field.name: serialize(getattr(model, field.name))})

                    return data
Example #20
0
def subdivision_dict(idCountry):
    """Returns a dictionary containing all countries available.
    If available method will return it from cache otherwise it will
    make the service call.

    :param idCountry: retrieve list of subdivisions for specified country
    :type idCountry: int
    :returns: Dictionary with available countries in format
              idCountry: country
    :rtype: dict
    """
    response = api.membership.subdivision.list(token=admin_session.get_token(), idCountry=idCountry)
    if response.status_code != 200 or not response.json().get("content"):
        return dict()
    total_records = response.json()["_metadata"]["totalRecords"]
    subdivisions = OrderedDict()
    subdivisions.update((s["idSubdivision"], s) for s in response.json()["content"])
    if len(subdivisions) < total_records:
        params = {"page": 1}
        while len(subdivisions) < total_records:
            response = api.membership.subdivision.list(
                token=admin_session.get_token(), idCountry=idCountry, params=params
            )
            subdivisions.update((s["idSubdivision"], s) for s in response.json()["content"])
            params["page"] += 1
    return subdivisions
def main(argv):
    filename = argv[0]
    all_histograms = OrderedDict()

    for histogram in histogram_tools.from_file(filename):
        name = histogram.name()
        parameters = OrderedDict()
        table = {"boolean": "2", "flag": "3", "enumerated": "1", "linear": "1", "exponential": "0"}
        # Use __setitem__ because Python lambdas are so limited.
        histogram_tools.table_dispatch(histogram.kind(), table, lambda k: parameters.__setitem__("kind", k))
        if histogram.low() == 0:
            parameters["min"] = 1
        else:
            parameters["min"] = histogram.low()

        try:
            buckets = histogram.ranges()
            parameters["buckets"] = buckets
            parameters["max"] = buckets[-1]
            parameters["bucket_count"] = len(buckets)
        except histogram_tools.DefinitionException:
            continue

        all_histograms.update({name: parameters})

        if startup_histogram_re.search(name) is not None:
            all_histograms.update({"STARTUP_" + name: parameters})

    print json.dumps({"histograms": all_histograms})
Example #22
0
def parse_yaml(file):
    with open(file) as f:
        yaml_data = yaml.load(f)
        res = OrderedDict()
        for x in paths(yaml_data, leaves=False):
            res.update(x)
        return res
Example #23
0
def getData(conditions, condition_paths, echo):
    """Reads in memory all the data needed for the Glioblastoma vs Metastases paper.

	This function could be used for any kind of pickled data that holds a dictionary.
	It might not be the fastest function or the most pythonic, but I wrote it to get familiar with
	zip/izip - generators and the 'with' keyword.

	Args:
		conditions: Iterable containing the conditions(String) to be investigated
		condition_paths: Iterable containinf the paths to the conditions to be investigated
		echo: String  ('LONG' or 'SHORT') corresponding to the TE of the acquisition

	Returns:
		dataset: Nested OrderedDict where every primary key is a patient. Every patient dict has the following struct-
		labels : numpy 1D-array - the label (condition) for each patient
	"""
    extension = "pkl"
    y = []
    dataset = OrderedDict()
    for index, condition, path in izip(count(), conditions, condition_paths):
        with open(os.path.join(path, condition + "_" + echo + "." + extension), "rb") as condition_file:
            data = pickle.load(condition_file)
            y.append(np.zeros(len(data) + index))
        dataset.update(data)
    return dataset, np.hstack(y)
    def scan(self, options=OrderedDict()):
        """
            Main scanning function.
            Launches airodump-ng, saves its pid,
            and makes itself xmlrpc representable.

            This implements the plugin system, calling on_before_scan
        """
        pluginmanager.trigger_event(
            "on_before_scan",
            target=self._target,
            session=self,
        )
        final_options = OrderedDict([
            ('dump_prefix', self.target_dir + "/" + self.config["name"]),
            ('wireless', self.mon_iface)
        ])
        final_options.update(options.items())

        result = self.aircrack.airodump(final_options, lambda x: True)

        # We wait default scan time and ask for airodump-ng to re-bump.
        # With this we can have an airodump-ng continuously scanning
        # on background until we want to get to a fixed channel
        # TODO Maybe magic with fixed / hoping channels and different cards?
        pid = result.result().result.pid
        Timer(int(self.config['scan_time']), self.on_scan_bumped, (pid))
        self.pids['airodump-ng'] = pid

        clean_self = clean_to_xmlrpc(self, ['extra_capabilities'])
        clean_self['_target'] = clean_to_xmlrpc(
            clean_self['_target'], ['parent'])
        return clean_self
Example #25
0
 def encode(self, obj):
     if isinstance(obj, Document):
         ret = OrderedDict()
         ret["_type"] = "document"
         ret["meta"] = obj.get("meta", {})
         ret.update(obj)
         return super(DocJSONEncoder, self).encode(ret)
Example #26
0
    def _compute_path(self, parent, path):

        graph = OrderedDict()
        print parent, path

        if hasattr(parent, "index"):
            graph[re.compile("^%s/$" % path)] = parent.index

        for name in dir(parent):
            if name.startswith("_"):
                continue

            obj = getattr(parent, name)

            if isAction(obj):
                node_body = "%s/%s" % (path, name)
                if isExposed(obj):
                    graph[re.compile("^%s$" % node_body)] = obj
                elif isResource(obj):
                    graph[re.compile("^%s/?(.*)" % node_body)] = obj

            elif obj:
                graph.update(self._compute_path(obj, "%s/%s" % (path, name)))

        return graph
Example #27
0
    def __new__(mcs, name, bases, attrs):
        # Collect fields from current class.
        current_fields = []
        for key, value in list(attrs.items()):
            if isinstance(value, Field):
                current_fields.append((key, value))
                attrs.pop(key)
        current_fields.sort(key=lambda x: x[1].creation_counter)
        attrs["declared_fields"] = OrderedDict(current_fields)

        new_class = super(DeclarativeFieldsMetaclass, mcs).__new__(mcs, name, bases, attrs)

        # Walk through the MRO.
        declared_fields = OrderedDict()
        for base in reversed(new_class.__mro__):
            # Collect fields from base class.
            if hasattr(base, "declared_fields"):
                declared_fields.update(base.declared_fields)

            # Field shadowing.
            for attr, value in base.__dict__.items():
                if value is None and attr in declared_fields:
                    declared_fields.pop(attr)

        new_class.base_fields = declared_fields
        new_class.declared_fields = declared_fields

        return new_class
Example #28
0
    def __new__(cls, name, bases, attrs):
        fields = OrderedDict()
        validators = {}
        parents = [base for base in bases if isinstance(base, FormBase)]

        # add fields from parents
        for parent in parents:
            if hasattr(parent, "base_fields"):
                fields.update(parent.base_fields)

        # add fields from the current form, modifying attributes
        for key, value in attrs.iteritems():
            if key.startswith("validate_") and callable(value):
                validators[key[9:]] = value
            elif isinstance(value, Field):
                value.key = key
                if value.name is None:
                    value.name = key
                fields[key] = value
                attrs[key] = FieldDescriptor(key)

        # apply the field-specific validators
        for key, value in validators.iteritems():
            if key in fields:
                fields[key].validators.append(value)

        attrs["base_fields"] = fields
        attrs["base_validators"] = validators

        return type.__new__(cls, name, bases, attrs)
Example #29
0
    def format(self, data, keys=None, group_by=None, domain=None):
        rows_dict = OrderedDict()
        tmp_data = OrderedDict()
        sorted_data = []
        value_chains = get_domain_configuration(domain).by_type_hierarchy
        for key, row in data.items():
            to_list = list(key)

            def find_name(list, deep):
                for element in list:
                    if deep == len(key) - 3 and key[deep + 1] == element.val:
                        return element.text
                    elif key[deep + 1] == element.val:
                        return find_name(element.next, deep + 1)

            name = find_name(value_chains, 0)
            to_list[2] = name
            tmp_data.update({tuple(to_list): row})
        if tmp_data:
            sorted_data = sorted(tmp_data.items(), key=lambda x: (x[0][0], x[0][2]))

        for row in sorted_data:
            formatted_row = self._format.format_row(row[1])
            if not rows_dict.has_key(formatted_row[0]):
                rows_dict[formatted_row[0]] = []
            rows_dict[formatted_row[0]].append(formatted_row[1])

        for key, row in rows_dict.items():
            total_column = self.calculate_total_column(row)
            res = [key, total_column]
            res.extend(row)
            yield res
Example #30
0
 def process(self, out_folder, key, ugid=None):
     ## make the output folder
     new_folder = os.path.join(out_folder, key)
     os.mkdir(new_folder)
     try:
         ## the name of the new shapefile
         new_shp = os.path.join(new_folder, key + ".shp")
         ## update the schema to include UGID
         meta = self._get_meta_()
         if "UGID" in meta["schema"]["properties"]:
             meta["schema"]["properties"].pop("UGID")
         new_properties = OrderedDict({"UGID": "int"})
         new_properties.update(meta["schema"]["properties"])
         meta["schema"]["properties"] = new_properties
         ctr = 1
         with fiona.open(new_shp, "w", **meta) as sink:
             for feature in self._iter_source_():
                 if ugid is None:
                     feature["properties"].update({"UGID": ctr})
                     ctr += 1
                 else:
                     feature["properties"].update({"UGID": int(feature["properties"][ugid])})
                 sink.write(feature)
         ## remove the cpg file. this raises many, many warnings on occasion
         os.remove(new_shp.replace(".shp", ".cpg"))
         ## try to copy the cfg file
         try:
             shutil.copy2(self.path.replace(".shp", ".cfg"), new_shp.replace(".shp", ".cfg"))
         except:
             warn("unable to copy configuration file - if it exists")
     except:
         ## remove the created folder on an exception
         shutil.rmtree(new_folder)
         raise