def diff_structure(st_keys, st_one, st_two):
    st_new = {}
    for i in st_one.keys():
        if i in st_keys:
            if i not in st_two:
                st_new[i] = st_one[i]
            elif st_keys[i] == "string" and str(st_one[i]) != str(st_two[i]):
                st_new[i] = st_one[i]
            elif st_keys[i] == "int" and int(st_one[i]) != int(st_two[i]):
                st_new[i] = st_one[i]
            elif st_keys[i] == "long" and long(st_one[i]) != long(st_two[i]):
                st_new[i] = st_one[i]
            elif st_keys[i] == "array":
                a_st_one = st_one[i] if isinstance(st_one[i], list) else eval(st_one[i])
                b_st_one = st_one[i] if isinstance(st_two[i], list) else eval(st_two[i])
                em_news = list(set(a_st_one).difference(set(b_st_one)))
                em_deleted = list(set(b_st_one).difference(set(a_st_one)))
                if len(em_news) > 0 or len(em_deleted) > 0:
                    st_new[i] = a_st_one
            else:
                pass
    if len(st_new.keys()) > 0:
        return st_new
    else:
        return None
Example #2
1
 def computeErrors(cls, logText):
     warnings = {}
     currentModule = None
     warningsCurrentModule = []
     for line in StringIO.StringIO(logText):
         # Mostly get rid of the trailing \n
         line = line.strip("\n")
         if line.startswith(cls.prefixModuleName):
             # Save results for previous module
             if currentModule:
                 warnings[currentModule] = set(map(TwistedCheckerError, warningsCurrentModule))
             # Initial results for current module
             moduleName = line.replace(cls.prefixModuleName, "")
             currentModule = moduleName
             warningsCurrentModule = []
         elif re.search(cls.regexLineStart, line):
             warningsCurrentModule.append(line)
         else:
             if warningsCurrentModule:
                 warningsCurrentModule[-1] += "\n" + line
             else:
                 log.msg("Bad result format for %s" % currentModule)
     # Save warnings for last module
     if currentModule:
         warnings[currentModule] = set(map(TwistedCheckerError, warningsCurrentModule))
     return warnings
Example #3
1
def expect_model_param(models, attribute_name, expected_values):
    print "param: {0}".format(attribute_name)
    actual_values = list(
        set(
            [
                m.params[attribute_name]["actual"]
                if type(m.params[attribute_name]["actual"]) != list
                else m.params[attribute_name]["actual"][0]
                for m in models.models
            ]
        )
    )
    # possible for actual to be a list (GLM)
    if type(expected_values) != list:
        expected_values = [expected_values]
    # limit precision. Rounding happens in some models like RF
    actual_values = [x if isinstance(x, basestring) else round(float(x), 5) for x in actual_values]
    expected_values = [x if isinstance(x, basestring) else round(float(x), 5) for x in expected_values]
    print "actual values: {0}".format(actual_values)
    print "expected values: {0}".format(expected_values)
    actual_values_len = len(actual_values)
    expected_values_len = len(expected_values)
    assert actual_values_len == expected_values_len, "Expected values len: {0}. Actual values len: " "{1}".format(
        expected_values_len, actual_values_len
    )
    diff = set(actual_values) - set(expected_values)
    assert len(diff) == 0, "Difference between actual and expected values: {0}".format(diff)
Example #4
1
 def submitMissingTasks(stage):
     myPending = pendingTasks.setdefault(stage, set())
     tasks = []
     have_prefer = True
     if stage == finalStage:
         for i in range(numOutputParts):
             if not finished[i]:
                 part = outputParts[i]
                 if have_prefer:
                     locs = self.getPreferredLocs(finalRdd, part)
                     if not locs:
                         have_prefer = False
                 else:
                     locs = []
                 tasks.append(ResultTask(finalStage.id, finalRdd, func, part, locs, i))
     else:
         for p in range(stage.numPartitions):
             if not stage.outputLocs[p]:
                 if have_prefer:
                     locs = self.getPreferredLocs(stage.rdd, p)
                     if not locs:
                         have_prefer = False
                 else:
                     locs = []
                 tasks.append(ShuffleMapTask(stage.id, stage.rdd, stage.shuffleDep, p, locs))
     logger.debug("add to pending %s tasks", len(tasks))
     myPending |= set(t.id for t in tasks)
     self.submitTasks(tasks)
Example #5
1
    def run(self):
        # updates the existing list or creates a new list
        # add the new items in 'contents' to the existing list for the topic
        # if a package seems to be removed in the new contents and
        # and is in 'done' topic, it gets removed in the new list

        self._ensure_pad_exists()
        doc = self._parse_pad()

        topic = self.module.params["topic"]
        new_contents = set(self.module.params["contents"])
        done = doc["done"]

        existing_list = set(doc.get(topic, []))
        updated_list = existing_list.difference(done).union(new_contents)
        contents_changed = existing_list != updated_list

        if contents_changed:
            doc[topic] = sorted(updated_list)
            doc["all"] = sorted(self._all_items(doc, ["all", "done"]))
            updated_yaml = yaml.safe_dump(doc, default_flow_style=False)
            self.epad.setText(padID=self.pad_id, text=updated_yaml)

        result = {"changed": contents_changed}
        self.module.exit_json(**result)
 def logCommand(self, name, completeCmd, testdir):
     self.log.info("TEST-INFO | %s | full command: %r" % (name, completeCmd))
     self.log.info("TEST-INFO | %s | current directory: %r" % (name, testdir))
     # Show only those environment variables that are changed from
     # the ambient environment.
     changedEnv = set("%s=%s" % i for i in self.env.iteritems()) - set("%s=%s" % i for i in os.environ.iteritems())
     self.log.info("TEST-INFO | %s | environment: %s" % (name, list(changedEnv)))
Example #7
1
File: vqa.py Project: tylin/VQA
    def loadRes(self, resFile):
        """
		Load result file and return a result object.
		:param   resFile (str)     : file name of result file
		:return: res (obj)         : result api object
		"""
        res = VQA()
        res.dataset["info"] = copy.deepcopy(self.dataset["info"])
        res.dataset["task_type"] = copy.deepcopy(self.dataset["task_type"])
        res.dataset["data_type"] = copy.deepcopy(self.dataset["data_type"])
        res.dataset["data_subtype"] = copy.deepcopy(self.dataset["data_subtype"])
        res.dataset["license"] = copy.deepcopy(self.dataset["license"])

        print "Loading and preparing results...     "
        time_t = datetime.datetime.utcnow()
        anns = json.load(open(resFile))
        assert type(anns) == list, "results in not an array of objects"
        annsQuesIds = [ann["question_id"] for ann in anns]
        assert set(annsQuesIds) == (
            set(annsQuesIds) & set(self.getQuesIds())
        ), "Results do not correspond to current VQA set"
        for ann in anns:
            quesId = ann["question_id"]
            qaAnn = self.qa[quesId]
            ann["image_id"] = qaAnn["image_id"]
            ann["question"] = qaAnn["question"]
            ann["question_type"] = qaAnn["question_type"]
            ann["answer_type"] = qaAnn["answer_type"]
        print "DONE (t=%0.2fs)" % ((datetime.datetime.utcnow() - time_t).total_seconds())

        res.dataset["annotations"] = anns
        res.createIndex()
        return res
 def set_attr(self, name, value):
     if name == "thisown":
         return self.this.own(value)
     if hasattr(self, name) or (name == "this"):
         set(self, name, value)
     else:
         raise AttributeError("You cannot add attributes to %s" % self)
Example #9
1
    def redo(self, channel, image):
        """This method is called when an image is set in a channel."""

        imname = image.get("name", "none")
        chname = channel.name
        # is image in contents tree yet?
        in_contents = self.is_in_contents(chname, imname)

        # get old highlighted entries for this channel -- will be
        # an empty set or one key
        old_highlight = channel.extdata.contents_old_highlight

        # calculate new highlight keys -- again, an empty set or one key
        if image is not None:
            key = self._get_hl_key(chname, image)
            new_highlight = set([key])
        else:
            # no image has the focus
            new_highlight = set([])

        # Only highlights active image in the current channel
        if self.highlight_tracks_keyboard_focus:
            if in_contents:
                self.update_highlights(self._hl_path, new_highlight)
            self._hl_path = new_highlight

        # Highlight all active images in all channels
        else:
            if in_contents:
                self.update_highlights(old_highlight, new_highlight)
            channel.extdata.contents_old_highlight = new_highlight

        return True
Example #10
1
def _process_node(node_dict):
    """ Convenience wrapper to validate and convert to a GraphNode. """

    required_fields = set([NODE_PROPERTY.ID, NODE_PROPERTY.TYPE, NODE_PROPERTY.PROPERTIES, NODE_PROPERTY.EDGES])

    required_properties = set([GRAPH_PROPERTY.CREATED_TS, GRAPH_PROPERTY.UPDATED_TS, GRAPH_PROPERTY.DELETED_TS])

    # data layer nodes only have fields explicitly required
    errors = required_fields.symmetric_difference(set(node_dict))

    if NODE_PROPERTY.PROPERTIES not in errors:
        # ensure properties the graph layer requires are present too
        properties = set(node_dict[NODE_PROPERTY.PROPERTIES])
        property_errors = required_properties.difference(properties)
        errors = errors.union(property_errors)

    if errors:
        raise GraphOutputError(errors, "Required fields or properties missing from GraphNode.")

    return GraphNode(
        node_dict[NODE_PROPERTY.ID],
        node_dict[NODE_PROPERTY.TYPE],
        node_dict[NODE_PROPERTY.PROPERTIES],
        node_dict[NODE_PROPERTY.EDGES],
    )
    def neargroups(self, blocknames):
        """Given a list or set of block names, finds groups of 'near' blocks.  Blocks are assigned the same group
        if they are neighbours, or share a neighbour."""
        blocknames = list(set(blocknames))
        groups = []
        for blk in blocknames:
            groups.append(set([blk]))
        from copy import copy

        done = False
        while not done:
            done = True
            for i, g in enumerate(groups):
                ng = copy(g)
                for blk in g:
                    ng = ng | self.block[blk].neighbour_name
                if i < len(groups) - 1:
                    for g2 in groups[i + 1 :]:
                        ng2 = copy(g2)
                        for blk in g2:
                            ng2 = ng2 | self.block[blk].neighbour_name
                        if ng & ng2:
                            g.update(g2)
                            groups.remove(g2)
                            done = False
                            break
                    if not done:
                        break
        return groups
Example #12
0
def all_ship_positions(length, condition):
    for i, j in ALL_POSITIONS:
        if all(i + k <= 9 and condition((i + k, j)) for k in range(length)):
            yield set((i + k, j) for k in range(length))

        if all(j + k <= 9 and condition((i, j + k)) for k in range(length)):
            yield set((i, j + k) for k in range(length))
Example #13
0
 def exec_plan(self, plan=None, layers=None):
     signatures = {}
     cont = True
     for phase in self.PHASES:
         for tactic in plan:
             if phase == "lint":
                 cont &= tactic.lint()
                 if cont is False and self.force is not True:
                     # no message, reason will already have been logged
                     raise BuildError()
             elif phase == "read":
                 # We use a read (into memory phase to make layer comps
                 # simpler)
                 tactic.read()
             elif phase == "call":
                 tactic()
             elif phase == "sign":
                 sig = tactic.sign()
                 if sig:
                     signatures.update(sig)
     new_repo = not self.manifest.exists()
     if new_repo:
         added, changed, removed = set(), set(), set()
     else:
         added, changed, _ = utils.delta_signatures(self.manifest)
         removed = self.clean_removed(signatures)
     # write out the sigs
     if "sign" in self.PHASES:
         self.write_signatures(signatures, layers)
     if self.report:
         self.write_report(new_repo, added, changed, removed)
Example #14
0
    def test_xml(self, client):
        root = Node.root()
        content = Type1(node=root, state="published", title="Export Test", language="en").save()
        content.tags.add("xml")
        content.tags.add("export")

        exporter = Exporter()
        xml, files = exporter.run(root)
        assert xml
        assert xml.tag == "site"
        assert xml.attrib.get("version", -1) == "1"
        assert xml.attrib.get("base", "--") == ""
        node = xml.find("node")
        assert node
        assert len(node.getchildren()) == 2

        children = node.find("children")
        assert len(children.getchildren()) == 0

        content = node.find("content")
        assert content
        assert content.tag == "content"
        assert len(content.getchildren()) == 1

        fields = content.find("fields")
        title = find_attribute(fields, "field", "name", "title")
        assert title.text == "Export Test"

        tags = fields.findall("tags/tag")
        assert len(tags) == 2
        assert set((tags[0].text, tags[1].text)) == set(("xml", "export"))
Example #15
0
 def _topology_fixes(self, state, migration):
     """
     This is part of algorithms logic which is not static. It combines
     information from the shard view and migration to figure out if
     migration involves terminating the shard(s). In this case it removes
     the steps for migrating structural agents (strategy "locally") from
     the migration plan.
     """
     kill_list = migration.get_kill_list()
     for shard, hosts in kill_list.iteritems():
         shard_view = first(x for x in state.shards if x.shard == shard)
         if shard_view is None:
             self.warning(
                 "Shard %r has not been found in shard view. "
                 "This is really strange! Shard structure taken "
                 "for analyzing: \n%s",
                 shard,
                 state.shards,
             )
             continue
         shard_is_terminating = set(hosts) == set(shard_view.hosts)
         if shard_is_terminating:
             self.log("Detected that shard %r will be terminating, removing" " local steps of the migration.", shard)
             migration = migration.remove_local_migrations(shard)
     return migration
Example #16
0
    def __init__(self, x, z):
        """
        :param int x: X coordinate in chunk coords
        :param int z: Z coordinate in chunk coords

        :ivar array.array heightmap: Tracks the tallest block in each xz-column.
        :ivar bool all_damaged: Flag for forcing the entire chunk to be
            damaged. This is for efficiency; past a certain point, it is not
            efficient to batch block updates or track damage. Heavily damaged
            chunks have their damage represented as a complete resend of the
            entire chunk.
        """

        self.x = int(x)
        self.z = int(z)

        self.heightmap = array("B", [0] * (16 * 16))
        self.blocklight = array("B", [0] * (16 * 16 * CHUNK_HEIGHT))

        self.sections = [Section() for i in range(16)]

        self.entities = set()
        self.tiles = {}

        self.damaged = set()
Example #17
0
    def getTags(self, keywords):
        predtags = set([])
        keywordlist = []
        # 		for keyword in keywords:
        # 			keywordlist.append(keyword.split(' '))
        keywordlist = [set(keyword.split(" ")) for keyword in keywords]
        taglist_set = [(set(wordlist[0]), wordlist[1]) for wordlist in self.taglist]
        # 	print taglist_set
        ################### check if a tag is completely in a keyword#################################

        start_time = time.time()
        for wordlist in taglist_set:
            flag = 0
            for keyword in keywordlist:
                if (wordlist[0]) <= (keyword):
                    flag = 1
                    break
            if flag == 1:
                predtags.add(self.tags[wordlist[1]])
        ############################################## step completed###############################

        # 	r = [tag[1] for tag,keyword in itertools.product(self.taglem,keywords) if((1-Levenshtein.ratio(tag[0],keyword))<0.2)]
        for keyword in keywords:
            for tag in self.taglem:
                score = Levenshtein.ratio(unicode(tag[0]), unicode(keyword))
                if score > 0.8:
                    predtags.add(self.tags[tag[1]])
        return set(predtags)
Example #18
0
    def test_regression_site_kwarg(self):
        mock_index_site = SearchSite()
        mock_index_site.register(MockModel)
        mock_index_site.register(AnotherMockModel)

        bsqs = SearchQuerySet(site=mock_index_site)
        self.assertEqual(set(bsqs.query.backend.site.get_indexed_models()), set([MockModel, AnotherMockModel]))
Example #19
0
    def subset(self, labels, invert=False):
        """Annotation subset

        Extract annotation subset based on labels

        Parameters
        ----------
        labels : iterable
            Label iterable.
        invert : bool, optional
            If invert is True, extract all but requested `labels`

        Returns
        -------
        subset : `Annotation`
            Annotation subset.
        """

        labels = set(labels)

        if invert:
            labels = set(self.labels()) - labels
        else:
            labels = labels & set(self.labels())

        sub = self.__class__(uri=self.uri, modality=self.modality)
        for segment, track, label in self.itertracks(label=True):
            if label in labels:
                sub[segment, track] = label

        return sub
    def __init__(self):
        self._crawls = 0
        self._crawls_lock = threading.Lock()

        self._content_urls = set()
        self._total_content_urls = 0
        self._other_urls = set()
        self._total_other_urls = 0
        self._urls_lock = threading.Lock()

        self._total_private_pages = 0
        self._private_pages = set()
        self._private_pages_lock = threading.Lock()

        self._total_other_domains = 0
        self._other_domains = set()
        self._other_domains_lock = threading.Lock()

        self._fetch_errors = {}
        self._fetch_errors_lock = threading.Lock()

        self._parse_errors = 0
        self._parse_errors_lock = threading.Lock()

        self._new_urls_histogram = [0]
        self._url_count_lock = threading.Lock()

        # For timing the crawl.
        self._before = time.time()
Example #21
0
    def insert_or_update(self, obj, **kwargs):

        update = {"$set": obj}
        empty_fields = set(self._fields) - set(obj)
        update["$unset"] = dict((x, 1) for x in empty_fields)

        self._db.ceps.update({"cep": obj["cep"]}, update, upsert=True)
Example #22
0
def card_adder(prospect_ids, matchmaker, db, currentcards, maxitems=5000):
    """
    Parameters
    ----------
    prospect_ids: list of card ids that may need adding to the actively searched bunch
    matchmaker: cv2.Flann object
    db: card database object
    currentcards: list of Card objects in same order as added to matchmaker
    maxitems: automatic cutoff to limit size of matcher object (for performance reasons)

    Returns
    -------
    matchmaker: the (bigger, maybe) matcher object
    currentcards: now complete list of Card objects in the same order as matchmaker indexes the descriptors
    """
    current_adds = set(prospect_ids) - set([c.id for c in currentcards])

    if ((len(current_adds) + len(currentcards)) > maxitems) and len(currentcards):
        print ("exceeded maximum allowed items in matcher object: maxitems = {}".format(maxitems))
        matchmaker.clear()
        currentcards = []

    for sid in current_adds:
        line = db.cur.execute("SELECT name, code, pic_path FROM cards WHERE id=(?)", (sid,)).fetchone()
        kp, desc = orientation.get_kpdesc(sid, c1="ak_points", c2="ak_desc")
        if desc is None:
            print ("database has no kp, descriptor entry for {}".format(orientation.idname(sid)))
            continue
        card = Card(line["name"], line["code"], sid, line["pic_path"], kp)
        currentcards.append(card)
        matchmaker.add([desc])
    return matchmaker, currentcards
Example #23
0
def remove_unassociated_nodes(bdf_filename, bdf_filename_out, renumber=False):
    """
    Removes nodes from a model that are not referenced.

    .. warning only considers elements
    .. renumber=False is not supported
    """
    model = BDF()
    model.read_bdf(bdf_filename, xref=True)

    nids_used = set([])
    for element in itervalues(model.elements):
        nids_used.update(element.node_ids)
    # for element in itervalues(model.masses):
    # nids_used.update(element.node_ids)
    all_nids = set(model.nodes.keys())

    nodes_to_remove = all_nids - nids_used
    for nid in nodes_to_remove:
        del model.nodes[nid]

    if renumber:
        starting_id_dict = {"nid": 1, "eid": 1, "pid": 1, "mid": 1}
        bdf_renumber(model, bdf_filename_out, size=8, is_double=False, starting_id_dict=starting_id_dict)
    else:
        model.write_bdf(bdf_filename_out)
Example #24
0
def clean():
    """Cleans up the virtualenv"""
    import os
    import glob

    for p in (
        "bin",
        "Scripts",
        "build",
        "dist",
        "include",
        "lib",
        "man",
        "share",
        "FlexGet.egg-info",
        "paver-minilib.zip",
        "setup.py",
    ):
        pth = path(p)
        if pth.isdir():
            pth.rmtree()
        elif pth.isfile():
            pth.remove()

    for pkg in set(options.setup.packages) | set(("tests",)):
        for filename in glob.glob(pkg.replace(".", os.sep) + "/*.py[oc~]"):
            path(filename).remove()
Example #25
0
    def sync_selection(self):
        """Push selection into the selection model."""
        staged = []
        unmerged = []
        modified = []
        untracked = []
        state = State(staged, unmerged, modified, untracked)

        paths = self.selected_paths()
        model = cola.model()
        model_staged = utils.add_parents(set(model.staged))
        model_modified = utils.add_parents(set(model.modified))
        model_unmerged = utils.add_parents(set(model.unmerged))
        model_untracked = utils.add_parents(set(model.untracked))

        for path in paths:
            if path in model_unmerged:
                unmerged.append(path)
            elif path in model_untracked:
                untracked.append(path)
            elif path in model_staged:
                staged.append(path)
            elif path in model_modified:
                modified.append(path)
            else:
                staged.append(path)
        # Push the new selection into the model.
        cola.selection_model().set_selection(state)
        return paths
Example #26
0
    def submit_salary_slip(self):
        """
			Submit all salary slips based on selected criteria
		"""
        ss_list = self.get_sal_slip_list()
        log = ""
        if ss_list:
            log = """<table>
						<tr>
							<td colspan = 2>Following Salary Slip has been submitted: </td>
						</tr>
						<tr>
							<td><u>SAL SLIP ID</u></td>
							<td><u>EMPLOYEE NAME</u></td>
						</tr>
					"""
        else:
            log = "<table><tr><td colspan = 2>No salary slip found to submit for the above selected criteria</td></tr>"

        for ss in ss_list:
            ss_obj = get_obj("Salary Slip", ss[0], with_children=1)
            set(ss_obj.doc, "docstatus", 1)
            ss_obj.on_submit()

            log += "<tr><td>" + ss[0] + "</td><td>" + ss_obj.doc.employee_name + "</td></tr>"
        log += "</table>"
        return log
Example #27
0
 def _match(cls, ctx, name, select=None):
     matches = ctx.spec.match_endpoint(name)
     if not matches:
         raise LookupError('No endpoint matching "{0}"'.format(name))
     if len(matches) == 1:
         return matches[0]
     if not select:
         raise ValueError(
             'Multiple endpoints matching "{0}" -- ' '"{1}"'.format(name, "\n".join([str(m) for m in matches]))
         )
     if select == "any":
         return matches[0]
     elif select == "shortest":
         return min(matches, key=lambda m: len(m["path"]))
     elif select == "longest":
         return max(matches, key=lambda m: len(m["path"]))
     elif isinstance(select, (list, tuple)):
         select = set(select)
         for match in matches:
             re_matches = set(re.findall(r":(\w+)", match["path"]))
             if re_matches == select:
                 return match
         else:
             raise ValueError("No matches for {0}".format(", ".join('"{0}"'.format(s) for s in select)))
     raise ValueError('Unsuported policy "{0}"'.format(select))
def generateTree(dataSet, labels):

    ## stop condition ##
    # condition 1 : no more benefit on dividing
    bestFeatureIndex = chooseBestFeature(dataSet)
    if bestFeatureIndex == -1:
        return majorityVote(dataSet)

    # condition 2 : only one kind left
    kindList = [vec[-1] for vec in dataSet]
    kindSet = set(kindList)
    if len(kindSet) == 1:
        return kindSet[0]

    # condition 3 : no more labels
    if len(labels) == 0:
        return majorityVote(dataSet)

    returnTree = {}
    returnTreeElement = {}
    availableFeatureValSet = [vec[bestFeatureIndex] for vec in dataSet]
    availableFeatureValSet = set(availableFeatureValSet)
    newLabels = labels[:bestFeatureIndex]
    newLabels.extend(labels[bestFeatureIndex + 1 :])
    for val in availableFeatureValSet:
        returnTreeElement[val] = generateTree(dataSplit(dataSet, bestFeatureIndex, val), newLabels)

    returnTree[labels[bestFeatureIndex]] = returnTreeElement

    return returnTree
Example #29
0
 def __eq__(self, other):
     return (
         self.attrib == other.attrib
         and set(self.mapInvalid) == set(other.mapInvalid)
         and set(self.mapMissing) == set(other.mapMissing)
         and self.lookup == other.lookup
     )
    def test_process_schema_rule_mismatched_table(self):

        test_case_buf = """
SET search_path = bial, pg_catalog

--
-- Name: ab_reporting_beta_match_ins; Type: RULE; Schema: bial; Owner: table_owner
--

CREATE RULE ab_reporting_beta_match_ins AS ON INSERT TO weblog_mart_tbls.ab_reporting_beta_match DO INSTEAD INSERT INTO ab_reporting_beta_match VALUES new.search_session_id;
"""
        expected_out = ""

        in_name = os.path.join(os.getcwd(), "infile")
        out_name = os.path.join(os.getcwd(), "outfile")
        with open(in_name, "w") as fd:
            fd.write(test_case_buf)

        dump_schemas = set(["bial"])
        dump_tables = set([("bial", "ab_reporting_beta_match")])
        with open(out_name, "w") as fdout:
            with open(in_name, "r") as fdin:
                process_schema(dump_schemas, dump_tables, fdin, fdout)

        with open(out_name, "r") as fd:
            results = fd.read()

        self.assertEquals(results, expected_out)
        os.remove(in_name)
        os.remove(out_name)