Example #1
1
    def __runner(self, pending_state, running_state, exc_handler):
        self.__transition(pending_state, running_state)

        step = lambda: next(self.__gen)
        while True:
            try:
                step()
            except StopIteration:
                self.__transition(running_state, _STOPPED)
                break
            except GeneratorExit:
                self.__transition(running_state, _CLOSED)
                break
            except BaseException:
                exc_info = sys.exc_info()
                try:
                    exc_handler(exc_info)
                except BaseException:
                    self.__transition(running_state, _FAILED)
                    raise

            if self.__state != running_state:
                break

            try:
                yield
            except BaseException:
                exc_info = sys.exc_info()
                step = lambda: self.__gen.throw(*exc_info)
            else:
                step = lambda: next(self.__gen)
Example #2
1
    def finalize(self):
        if Tap in self.possible_gestures:
            tp = next(self.touch_points.itervalues())
            if tp.total_movement <= TAP_THRESHOLD:
                self.tapped.emit(tp)
                return

        if Swipe in self.possible_gestures:
            tp = next(self.touch_points.itervalues())
            st = tp.swipe_type
            if st is not None:
                self.swiped.emit(st)
                return

        if Pinch in self.possible_gestures:
            points = tuple(self.touch_points.itervalues())
            if len(points) == 2:
                pinch_dir = get_pinch(*points)
                if pinch_dir is not None:
                    self.pinched.emit(pinch_dir)

        if not self.hold_started:
            return

        if TapAndHold in self.possible_gestures:
            tp = next(self.touch_points.itervalues())
            self.tap_hold_finished.emit(tp)
            return

        if SwipeAndHold in self.possible_gestures:
            self.swipe_hold_finished.emit(self.hold_data[1])
            return
Example #3
1
def read_file():
    IS_LOCAL = False

    lines = open("index2.html", "r")
    array = []
    for line in lines:

        # First we make sure to remove the old prod comments
        # so the lines in between can show
        if line.find(START_PROD_DELIMETER) >= 0:
            line = next(lines)

        if line.find(END_PROD_DELIMETER) >= 0:
            line = next(lines)

        # Next we make sure to now ignore all the local script tags
        if line.find(START_LOCAL_DELIMETER) >= 0:
            IS_LOCAL = True

        if line.find(END_LOCAL_DELIMETER) >= 0:
            IS_LOCAL = False
            line = ""

        if IS_LOCAL:
            localLines.append(line)
        else:
            newLines.append(line)
Example #4
1
 def __next__(self):
     """
     Return the next error node
     """
     for child in self.children:
         yield child
     next(self.parent)
def init_word_tables(ftable1, ftable2):
    fileptr = 0
    # unique = 0
    temp_train_emails = train_emails.split("\n")
    # for each training document for spam
    for i in range(0, 350):
        doc = temp_train_emails[i]
        doc = doc.split(" ")
        # skip first entry (becasue we already know class)
        iterdoc = iter(doc)
        next(iterdoc)
        for word in iterdoc:
            word = word.split(":")
            # print(word)
            if word[0] not in ftable1:
                ftable1[word[0]] = int(word[1])
            else:
                ftable1[word[0]] = ftable1[word[0]] + int(word[1])

                # do the same for nonspam
    for i in range(350, 700):
        doc = temp_train_emails[i]
        doc = doc.split(" ")
        # skip first entry (becasue we already know class)
        iterdoc = iter(doc)
        next(iterdoc)
        for word in iterdoc:
            word = word.split(":")
            # print(word)
            if word[0] not in ftable2:
                # unique = unique+1
                ftable2[word[0]] = int(word[1])
            else:
                ftable2[word[0]] = ftable2[word[0]] + int(word[1])
Example #6
1
    def __init__(self, edges, time_step, start_time=None):
        """
        edges (sequence of (float, int) tuples)
            An iterable of 2-tuples representing each edge transition.
            The 2-tuples *must* be in the absolute time form (time, logic level).
        
        time_step (float)
            The default time step for advance() when it is called
            without an argument.
        
        start_time (float)
            The initial starting time for the sequence.
            
        Raises StreamError when there are less than two elements to the edges iterable
        """
        self.edges = edges
        self.time_step = time_step
        self.it_end = False

        try:
            self.cur_states = next(self.edges)
            self.next_states = next(self.edges)
        except StopIteration:
            self.it_end = True
            raise StreamError("Not enough edges to initialize edge_sequence() object")

        self.cur_time = self.cur_states[0]

        if start_time is not None:
            init_step = start_time - self.cur_time
            if init_step > 0.0:
                self.advance(init_step)
Example #7
1
def iterunique(source, key):
    # assume source is sorted
    # first need to sort the data
    it = iter(source)

    hdr = next(it)
    yield tuple(hdr)

    # convert field selection into field indices
    if key is None:
        indices = range(len(hdr))
    else:
        indices = asindices(hdr, key)

    # now use field indices to construct a _getkey function
    # N.B., this may raise an exception on short rows, depending on
    # the field selection
    getkey = operator.itemgetter(*indices)

    prev = next(it)
    prev_key = getkey(prev)
    prev_comp_ne = True

    for curr in it:
        curr_key = getkey(curr)
        curr_comp_ne = curr_key != prev_key
        if prev_comp_ne and curr_comp_ne:
            yield tuple(prev)
        prev = curr
        prev_key = curr_key
        prev_comp_ne = curr_comp_ne

    # last one?
    if prev_comp_ne:
        yield prev
Example #8
0
    def generator(row_iter, delim=","):
        # TODO: this is where we are spending times (~80%). I think things
        # could be made more efficiently:
        #   - We could for example "compile" the function, because some values
        #   do not change here.
        #   - The function to convert a line to dtyped values could also be
        #   generated on the fly from a string and be executed instead of
        #   looping.
        #   - The regex are overkill: for comments, checking that a line starts
        #   by % should be enough and faster, and for empty lines, same thing
        #   --> this does not seem to change anything.

        # We do not abstract skipping comments and empty lines for performances
        # reason.
        raw = next(row_iter)
        while r_empty.match(raw) or r_comment.match(raw):
            raw = next(row_iter)

        # 'compiling' the range since it does not change
        # Note, I have already tried zipping the converters and
        # row elements and got slightly worse performance.
        elems = list(range(ni))

        row = raw.split(delim)
        yield tuple([convertors[i](row[i]) for i in elems])
        for raw in row_iter:
            while r_comment.match(raw) or r_empty.match(raw):
                raw = next(row_iter)
            row = raw.split(delim)
            yield tuple([convertors[i](row[i]) for i in elems])
Example #9
0
def input_value_matches(kwd):
    """ return keyword matches to values """
    scores_by_entity = {}
    for field in get_fields_tracked():
        tracker = get_tracker(field)

        if "*" in kwd:
            if next(tracker.find(kwd, limit=2), False):
                scores_by_entity[field] = (0.7, {"map_to": field})

            match, unique = check_unique_match(tracker, field, "^" + kwd + "$")
            if unique:
                scores_by_entity[field] = (0.8, {"map_to": field, "adjusted_keyword": unique})

        else:
            # 1) check for exact-match (ignoring case)
            match = next(tracker.find("^" + kwd + "$", limit=2), False)
            # does it match case?
            if match:
                scores_by_entity[field] = (match == kwd and 1.0 or 0.95, {"map_to": field, "adjusted_keyword": match})
            # 2) partial match
            elif len(kwd) >= 2:
                # wildcard on both sides
                match, unique = check_unique_match(tracker, field, "^*" + kwd + "*$")
                if match:
                    kwd_new = ("*" + kwd + "*").replace("**", "*")
                    if unique:
                        kwd_new = unique  # in unique match, modify kwd
                    scores_by_entity[field] = (0.6, {"map_to": field, "adjusted_keyword": kwd_new})

    return scores_by_entity
Example #10
0
    def testRenameAttributes(self):
        layer = QgsVectorLayer("Point", "test", "memory")
        provider = layer.dataProvider()

        res = provider.addAttributes(
            [QgsField("name", QVariant.String), QgsField("age", QVariant.Int), QgsField("size", QVariant.Double)]
        )
        layer.updateFields()
        assert res, "Failed to add attributes"
        ft = QgsFeature()
        ft.setGeometry(QgsGeometry.fromPoint(QgsPoint(10, 10)))
        ft.setAttributes(["Johny", 20, 0.3])
        res, t = provider.addFeatures([ft])

        # bad rename
        self.assertFalse(provider.renameAttributes({-1: "not_a_field"}))
        self.assertFalse(provider.renameAttributes({100: "not_a_field"}))
        # already exists
        self.assertFalse(provider.renameAttributes({1: "name"}))

        # rename one field
        self.assertTrue(provider.renameAttributes({1: "this_is_the_new_age"}))
        self.assertEqual(provider.fields().at(1).name(), "this_is_the_new_age")
        layer.updateFields()
        fet = next(layer.getFeatures())
        self.assertEqual(fet.fields()[1].name(), "this_is_the_new_age")

        # rename two fields
        self.assertTrue(provider.renameAttributes({1: "mapinfo_is_the_stone_age", 2: "super_size"}))
        self.assertEqual(provider.fields().at(1).name(), "mapinfo_is_the_stone_age")
        self.assertEqual(provider.fields().at(2).name(), "super_size")
        layer.updateFields()
        fet = next(layer.getFeatures())
        self.assertEqual(fet.fields()[1].name(), "mapinfo_is_the_stone_age")
        self.assertEqual(fet.fields()[2].name(), "super_size")
Example #11
0
    def verify_checksums(self, records, checksums="checksums.txt"):
        """Check checksums in record set against list of checksums

        Args:
            records (list): list of xmu.DeepDict objects
            checksums (str): path to checksum file
        """
        with open("checksums.txt", "rb") as f:
            rows = csv.reader(f)
            next(rows)
            self.checksums = {row[0]: row[1] for row[0], row[1] in rows}
        errors = []
        for fn, media in records.iteritems():
            if len(media) > 1:
                errors.append(fn)
            else:
                m = media[0]
                if m.checksum != self.checksums.get(m.filename):
                    errors.append(fn)
        # Write any errors to file
        if errors:
            with open("errors.txt", "wb") as f:
                f.write("\n".join(errors))
        else:
            print "No errors found!"
 def timerEvent(self, event):
     if self._generator is None:
         return
     try:
         next(self._generator)
     except StopIteration:
         self.pausevalue()
    def __init__(self, filename=None, calibfile=None, extra_offset_file=None, start=None, **kwargs):
        QtGui.QMainWindow.__init__(self, **kwargs)

        self.setWindowTitle("DragonBrowser")

        self.filename = filename or QtGui.QFileDialog.getOpenFileName(self, "Open file", os.environ["HOME"])
        if not self.filename:
            sys.exit()

        if extra_offset_file is not None:
            self.calib = TimelapseCalibrationExtraOffsets(calibfile, extra_offset_file)
        elif calibfile is not None:
            self.calib = TimelapseCalibration(calibfile)
        else:
            self.calib = lambda x: x

        self.generator = EventGenerator(self.filename)

        if start is not None:
            for i in range(start):
                next(self.generator)

        self.dragon_event = self.calib(next(self.generator))
        self.gains = self.dragon_event.data.dtype.names
        self.n_channels = self.dragon_event.data.shape[0]
        self.init_gui()
Example #14
0
File: 3.py Project: paohui817/arena
def main():
    iterator = iter(sys.stdin.read().split())
    a, b = int(next(iterator)), int(next(iterator))
    res = a - b
    # print(res + 1 if res % 10 == 0 else res - 1)
    # make sure the result is positive
    print(res + 1 if res % 10 < 9 else res - 1)
Example #15
0
    def find_timestamp_in_file(self, filesArr):

        for i, fileName in enumerate(filesArr):
            fileObj = FileObj.FileObj()
            fileObj.fileName = fileName

            print "fileName===" + str(fileName) + " --i=" + str(i)
            try:
                with open(fileName, "rb+") as fileReader:
                    ## Find line no. in the file ##
                    lineNum = self.__seek_line_no(fileReader)
                    if lineNum == -1:
                        print "ERROR: Timestamp is incorrect or not exist in the file!!!", self.timeOrgForamt

                        fileReader.close()
                        next(iter(filesArr))
                    else:
                        fileObj.lineExist = True
                        fileObj.lineNoInFile = lineNum
                        print "Timestamp found !!"

                fileReader.close()
                print "1.file closed!"
                print "#" * 20
            except IOError as e:
                print "I/O error({0}): {1}".format(e.errno, e.strerror)
            except Exception as e:
                print "Other exception: " + e.message + str(e.__class__)
            self.fileObjectsArr.append(fileObj)
        return
Example #16
0
def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("dll")
    ap.add_argument("lib")
    ap.add_argument("machine")

    args = ap.parse_args()

    p = subprocess.Popen(["dumpbin", "/exports", args.dll], stdout=subprocess.PIPE)

    for _ in range(19):
        next(p.stdout)

    def_file = args.lib.replace(".lib", ".def")

    with open(def_file, "w") as f:
        f.write("LIBRARY %s\n" % os.path.basename(args.dll))
        f.write("EXPORTS\n")

        for l in p.stdout:
            l = l.decode("utf-8")
            l = l.strip()

            if not l:
                break

            f.write(l.split()[3] + "\n")

    p = subprocess.check_call(["lib", "/def:" + def_file, "/out:" + args.lib, "/machine:" + args.machine])
Example #17
0
def changedEntropy(rev_type):
    csvreader = csv.reader(open(rev_type + "_changed_types.csv", "rb"))
    next(csvreader, None)

    entropy_list = list()
    for line in csvreader:
        rev = int(line[0])
        changed_types = line[1:]
        ct_list = list()
        for ct in changed_types:
            if int(ct) > 0:
                ct_list.append(int(ct))
        unique_ct = len(ct_list)
        ct_cnt = len(line) - 1
        entropy = 0
        if ct_cnt == 1:
            entropy = 1
        else:
            for ct in ct_list:
                p_ct = ct / sum(ct_list)
                entropy += -(p_ct * math.log(p_ct, ct_cnt))
        entropy_list.append([rev, unique_ct, round(entropy, 3)])
    df = pd.DataFrame(entropy_list, columns=["revision", "unique_types", "entropy"]).sort("revision")
    df.to_csv(rev_type + "_changed_entropy.csv", index=False)
    return
Example #18
0
def test_records():
    table = (("foo", "bar"), ("a", 1), ("b", 2), ("c", 3))
    actual = records(table)
    # access items
    it = iter(actual)
    o = next(it)
    eq_("a", o["foo"])
    eq_(1, o["bar"])
    o = next(it)
    eq_("b", o["foo"])
    eq_(2, o["bar"])
    # access attributes
    it = iter(actual)
    o = next(it)
    eq_("a", o.foo)
    eq_(1, o.bar)
    o = next(it)
    eq_("b", o.foo)
    eq_(2, o.bar)
    # access with get() method
    it = iter(actual)
    o = next(it)
    eq_("a", o.get("foo"))
    eq_(1, o.get("bar"))
    eq_(None, o.get("baz"))
    eq_("qux", o.get("baz", default="qux"))
Example #19
0
def read_header(ofile):
    """Read the header of the iterable ofile."""
    i = next(ofile)

    # Pass first comments
    while r_comment.match(i):
        i = next(ofile)

    # Header is everything up to DATA attribute ?
    relation = None
    attributes = []
    while not r_datameta.match(i):
        m = r_headerline.match(i)
        if m:
            isattr = r_attribute.match(i)
            if isattr:
                name, type, i = tokenize_attribute(ofile, i)
                attributes.append((name, type))
            else:
                isrel = r_relation.match(i)
                if isrel:
                    relation = isrel.group(1)
                else:
                    raise ValueError("Error parsing line %s" % i)
                i = next(ofile)
        else:
            i = next(ofile)

    return relation, attributes
Example #20
0
 def lookForRes(self, resCode, codeType=1):
     if codeType == 1:
         return next((x for x in self.resTab if x.letterCode1 == resCode), None)
     elif codeType == 3:
         return next((x for x in self.resTab if x.letterCode3 == resCode), None)
     else:
         return None
Example #21
0
def link_found_mac_users(cfg, detected_macs):
    users_filename = cfg.get("usersfile", "route")
    known_users = []
    unknown_macs = []
    with open(users_filename, "r") as csvfile:
        reader = csv.reader(csvfile, delimiter=";")
        next(reader, None)
        csvContain = []
        for a in reader:
            if len(a) > 1:
                mac = a[0].replace(" ", "")
                if len(a) >= 3 and mac.count(":") == 5 and len(mac) == 17:
                    csvContain.append(a)
                else:
                    print("Incorrect entry: \n" + str(a))

        if len(csvContain) < 1:
            print("No Mac found in :" + users_filename)

        for row in detected_macs:
            for user in csvContain:
                if row == user[0].replace(" ", "").lower():
                    toImprove = user not in known_users
                    if str2bool(user[1]) and toImprove:
                        known_users.append(user)
                    break
            else:
                if row not in unknown_macs:
                    unknown_macs.append(row)

    return known_users, set(unknown_macs)
Example #22
0
    def test_parse(self):
        buf = self.data
        pkt = packet.Packet(buf)
        i = iter(pkt)

        eq_(type(next(i)), ethernet.ethernet)
        eq_(type(next(i)), lldp.lldp)
Example #23
0
 def __exit__(self, type, value, traceback):
     if type is None:
         try:
             next(self.gen)
         except StopIteration:
             return
         else:
             raise RuntimeError("generator didn't stop")
     else:
         if value is None:
             # Need to force instantiation so we can reliably
             # tell if we get the same exception back
             value = type()
         try:
             self.gen.throw(type, value, traceback)
             raise RuntimeError("generator didn't stop after throw()")
         except StopIteration as exc:
             # Suppress the exception *unless* it's the same exception that
             # was passed to throw().  This prevents a StopIteration
             # raised inside the "with" statement from being suppressed
             return exc is not value
         except:
             # only re-raise if it's *not* the exception that was
             # passed to throw(), because __exit__() must not raise
             # an exception unless __exit__() itself failed.  But throw()
             # has to raise the exception to signal propagation, so this
             # fixes the impedance mismatch between the throw() protocol
             # and the __exit__() protocol.
             #
             if sys.exc_info()[1] is not value:
                 raise
Example #24
0
def update(ev):
    global myfunc, index, up, levels2, noise, cmap, color

    if index > 0 and index < 25:
        # update left panes rolling upwards
        noise = np.roll(noise, 1, axis=0)
        image1.set_data(noise)
        curve1a.set_data(noise)
        curve1b.set_data(noise)

        # update colors/colormap
        if (index % 5) == 0:
            curve1b.color = next(color)
            cm = next(cmap)
            image2.cmap = cm
            curve2b.color = cm

        # change isocurves by clipping data/or changing limits
        # update curve1b levels (clip)
        curve1b.levels = levels1[index:-index]

        # update curve2b data with clipped data
        im2 = np.clip(myfunc, clip[index], clip[-index])
        curve2b.set_data(im2)

        index += up
    else:
        # change index direction
        up = -up
        index += up

    canvas.update()
Example #25
0
def parse(string):
    """Parse args, kwargs call representation.

    Parameters
    ----------
    string: str
        String to parse.

    Returns
    -------
    tuple
        (args, kwargs)
    """
    args = []
    kwargs = {}
    elements = next(csv.reader([string], quotechar="\\"))
    for element in elements:
        parts = []
        literals = next(csv.reader([element], delimiter="=", quotechar="\\"))
        for literal in literals:
            literal = literal.strip()
            try:
                value = ast.literal_eval(literal)
            except Exception:
                value = literal
            parts.append(value)
        if len(parts) == 1:
            args.append(parts[0])
        elif len(parts) == 2:
            kwargs[parts[0]] = parts[1]
    args = tuple(args)
    return (args, kwargs)
Example #26
0
def generate_udiff_lines(left, right):
    """Combine the diff lines from ``left`` and ``right``, and
    generate the lines of the resulting udiff."""
    assert len(left) == len(right), (left, right)
    right_gens = iter(right)
    for left_g in left:
        right_g = next(right_gens)
        left_line = next(left_g, None)
        if (left_line is not None) and (left_line[0] == " "):
            left_g = itertools.chain((left_line,), left_g)
            for line in left_g:
                right_line = next(right_g)
                assert line == right_line, (line, right_line)
                yield line
        else:
            if left_line is not None:
                assert left_line[0] == "-", left_line
                yield left_line
                for line in left_g:
                    assert line[0] == "-", line
                    yield line

            for line in right_g:
                assert line[0] == "+", line
                yield line
Example #27
0
    def list_episodes(self, showname, snum=None):
        """
        Get the list of all of the episodes available for a given show for a given season. If no season is specified,
        it will return all of the episodes for the show.
        :param showname: The name of the show
        :param season: The season whose episodes you want. If not specified, this function will return all of the episodes
                        for the show.
        :return: A list of the episode file names for the requested show.
        :rtype: list[str]
        """
        seasons = self.list_seasons(showname)
        showdirectory = self.show_path(showname)
        ret = []

        if snum is None:
            # Get all the episodes if no season is specified
            for season in seasons:
                walk = os.walk(showdirectory + "/" + season)
                directory, _, episodes = next(walk)
                ret = ret + episodes
        else:
            seasoncandidates = filter(lambda x: mediautlils.seasonnumber(x) == snum, seasons)
            for season in seasoncandidates:
                walk = os.walk(showdirectory + "/" + season)
                directory, _, episodes = next(walk)
                ret = episodes

        # Filter out non-video files
        filtered = []
        for result in ret:
            if any(ext in result for ext in extensionsToCheck):
                filtered.append(result)

        return filtered
def processOrders(reader, customers, orders):
    statuses = ["Completed", "Shipped", "Partially Shipped"]
    for row in reader:
        if not row["Customer Name"]:
            continue
        else:
            while row["Order Status"] in statuses:
                currentCustomer = Customer(row["Customer Name"], row["Customer Phone"])
                customers.append(currentCustomer)
                while row["Customer Name"] == currentCustomer.name:
                    if row["Order Status"] in statuses:
                        currentDateList = row["Order Date"].split(sep="/")
                        currentDateList = list(map(lambda s: int(s), currentDateList))
                        currentDateObj = date(currentDateList[2], currentDateList[0], currentDateList[1])
                        currentOrder = Order(currentCustomer, currentDateObj, row["Order Total (inc tax)"])
                        while (
                            currentOrder.date == currentDateObj and currentOrder.total == row["Order Total (inc tax)"]
                        ):
                            currentOrder.updateItems(row["Product Name"], row["Product Qty"])
                            currentCustomer.updateItems(row["Product Name"], row["Product Qty"])
                            try:
                                row = next(reader)
                                currentDateList = row["Order Date"].split(sep="/")
                                currentDateList = list(map(lambda s: int(s), currentDateList))
                                currentDateObj = date(currentDateList[2], currentDateList[0], currentDateList[1])
                                continue
                            except StopIteration:
                                orders.append(currentOrder)
                                return
                        orders.append(currentOrder)
                    else:
                        try:
                            row = next(reader)
                        except StopIteration:
                            return
Example #29
0
def _cancel_after_first_response(stub):
    request_response_sizes = (31415, 9, 2653, 58979)
    request_payload_sizes = (27182, 8, 1828, 45904)
    with _Pipe() as pipe:
        response_iterator = stub.FullDuplexCall(pipe)

        response_size = request_response_sizes[0]
        payload_size = request_payload_sizes[0]
        request = messages_pb2.StreamingOutputCallRequest(
            response_type=messages_pb2.COMPRESSABLE,
            response_parameters=(messages_pb2.ResponseParameters(size=response_size),),
            payload=messages_pb2.Payload(body=b"\x00" * payload_size),
        )
        pipe.add(request)
        response = next(response_iterator)
        # We test the contents of `response` in the Ping Pong test - don't check
        # them here.
        response_iterator.cancel()

        try:
            next(response_iterator)
        except grpc.RpcError as rpc_error:
            if rpc_error.code() is not grpc.StatusCode.CANCELLED:
                raise
        else:
            raise ValueError("expected call to be cancelled")
Example #30
0
 def __next__(self):
     token = self.stream.current
     if token.type is TOKEN_EOF:
         self.stream.close()
         raise StopIteration()
     next(self.stream)
     return token