Beispiel #1
0
 def roundedSummaryStats(self,array):
     [mean,stddev,min,max]=SummaryStats.summaryStats(array)
     mean=int(100.0*mean+5.0/9.0)/100.0
     stddev=int(100.0*stddev+5.0/9.0)/100.0
     min=int(100.0*min+5.0/9.0)/100.0
     max=int(100.0*max+5.0/9.0)/100.0
     return [mean,stddev,min,max]
Beispiel #2
0
def readnumber(number):
    """
    :param float number: a float number (with decimals) indicating a quantity
    :return: a text that indicates the full amount in word form, properly ending each digit with the right term.
    """
    position_call = ["แสน", "หมื่น", "พัน", "ร้อย", "สิบ", ""]
    number_call = ["", "หนึ่ง", "สอง", "สาม","สี่", "ห้า", "หก", "เจ็ด", "แปด", "เก้า"]
    number = number
    ret = ""
    if (number == 0): return ret
    if (number > 1000000):
        ret += readnumber(int(number / 1000000)) + "ล้าน"
        number = int(math.fmod(number, 1000000))
    divider = 100000
    pos = 0
    while(number > 0):
        d=int(number/divider)
        if (divider == 10) and (d == 2):
            ret += "ยี่"
        elif (divider == 10) and (d == 1):
            ret += ""
        elif ((divider == 1) and (d == 1) and (ret != "")):
            ret += "เอ็ด"
        else:
            ret += number_call[d]
        if d:
            ret += position_call[pos]
        else:
            ret += ""
        number=number % divider
        divider=divider / 10
        pos += 1
    return ret
Beispiel #3
0
 def printcoords(event):
     zoom = sc.get()
     x = int(event.x/zoom)
     y = int(event.y/zoom)
     selected["x"]=x
     selected["y"]=y
     updateImage()
Beispiel #4
0
    def load(name):
        fs_name = format.path("{0}.chunks".format(name))
        if not os.path.exists(fs_name):
            raise IOError
        with io.open(fs_name) as fp:
            name = fp.readline()[:-1]
            size = fp.readline()[:-1]
            if name.startswith("name:") and size.startswith("size:"):
                name = name[5:]
                size = size[5:]
            else:
                raise TypeError("chunk.file has wrong format")
            ci = ChunkInfo(name)
            ci.loaded = True
            ci.set_size(size)
            while True:
                if not fp.readline():  #: skip line
                    break
                name = fp.readline()[1:-1]
                range = fp.readline()[1:-1]
                if name.startswith("name:") and range.startswith("range:"):
                    name = name[5:]
                    range = range[6:].split("-")
                else:
                    raise TypeError("chunk.file has wrong format")

                ci.add_chunk(name, (int(range[0]), int(range[1])))
        return ci
Beispiel #5
0
        def updateImage():
            if selected["err"]:
                selected["err"].destroy()
                selected["err"]=None
            zoom = sc.get()               
            newImg = originalImg.convert("RGB")
            for nt, x, y in selected["triples"]:
                try: #1.1.6 and above
                    newImg[x,y] = (0,150,255*nt//len(cg.seq))
                except:
                    newImg.putpixel((x,y), (0,150,255*nt//len(cg.seq)))   
            if selected["x"] and selected["y"]:   
                x,y = selected["x"], selected["y"]    
                try: #1.1.6 and above
                    newImg[x,y] = (255,0,0)
                except:
                    newImg.putpixel((x,y), (255,0,0))
                w = tk.Label(root, text="Selected coordinates: {},{}".format(x,y))
                w.grid(row = 4, column = 0, sticky = "E")
                w = tk.Label(root, text="Please choose corresponding nucleotide\n(1-based coordinates):")
                w.grid(row = 5, column = 0, sticky = "E")
                w = tk.Entry(root)
                selected["nt_entry"] = w
                w.grid(row = 5, column = 1, sticky = "W")
                w.focus_set()
                w = tk.Button(root, text="OK", command=submitSelected)
                w.grid(row = 5, column = 1)


            newsize = (originalImg.size[0]*int(zoom), originalImg.size[1]*int(zoom))
            newImg = newImg.resize(newsize, Image.NEAREST)
            img = ImageTk.PhotoImage(newImg)
            imgDisplay.configure(image = img)
            imgDisplay.image = img
Beispiel #6
0
def get_partners_huwknd(op_number):
    """
    List of partner mentioning of OP in Huwknd table
    """
    print("get_partners_huwknd()")

    npartners = 0
    partners = []

    huwknd_qs = get_huwknd(op_number)

    if huwknd_qs is None:
        print("Huwknd does not contain partners for OP %d", op_number)
    else:
        nhuwknds = huwknd_qs.count()
        print("Huwknd has %d entries for OP %s" % (nhuwknds, op_number))
        for huwknd in huwknd_qs:
            mar_date = "%02d/%02d/%04d" % (int(huwknd.hdag), int(huwknd.hmaand), int(huwknd.hjaar))
            gebsex = huwknd.gebsex

            if gebsex == 'm':  # OP is man
                partnersex = 'v'
                familyname = huwknd.anmhv  # familyname wife
                firstname1 = huwknd.vrn1hv  # firstname1 wife
                firstname2 = huwknd.vrn2hv  # firstname2 wife
                firstname3 = huwknd.vrn3hv  # firstname3 wife
            elif gebsex == 'v':  # OP is woman
                partnersex = 'm'
                familyname = huwknd.anmhm  # familyname husband
                firstname1 = huwknd.vrn1hm  # firstname1 husband
                firstname2 = huwknd.vrn2hm  # firstname2 husband
                firstname3 = huwknd.vrn3hm  # firstname3 husband

            if familyname is None: familyname = ""
            if firstname1 is None: firstname1 = ""
            if firstname2 is None: firstname2 = ""
            if firstname3 is None: firstname3 = ""

            fullname = familyname
            fullname += ", "
            fullname += firstname1
            if firstname2 != "":
                fullname += " "
                fullname += firstname2
            if firstname3 != "":
                fullname += " "
                fullname += firstname3

            if gebsex == 'm' or gebsex == 'v':
                partner = {
                    "mar_date": mar_date,
                    "sex": partnersex,
                    "fullname": fullname
                }
                npartners += 1
                print("partner:", partner)
                partners.append(partner)
        print("Huwknd has %d partner entries for OP %s" % (npartners, op_number))

    return partners
Beispiel #7
0
    def __init__(self, delimiter=None, comments='#', autostrip=True, encoding=None):
        delimiter = _decode_line(delimiter)
        comments = _decode_line(comments)

        self.comments = comments

        # Delimiter is a character
        if (delimiter is None) or isinstance(delimiter, basestring):
            delimiter = delimiter or None
            _handyman = self._delimited_splitter
        # Delimiter is a list of field widths
        elif hasattr(delimiter, '__iter__'):
            _handyman = self._variablewidth_splitter
            idx = np.cumsum([0] + list(delimiter))
            delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]
        # Delimiter is a single integer
        elif int(delimiter):
            (_handyman, delimiter) = (
                    self._fixedwidth_splitter, int(delimiter))
        else:
            (_handyman, delimiter) = (self._delimited_splitter, None)
        self.delimiter = delimiter
        if autostrip:
            self._handyman = self.autostrip(_handyman)
        else:
            self._handyman = _handyman
        self.encoding = encoding
Beispiel #8
0
def Waveform(addr, count, isTA, write=False, label=None):
	header = (WFM << 4) | (write & 0x1)
	count = int(count)
	count = ((count // ADDRESS_UNIT)-1) & 0x000fffff # 20 bit count
	addr = (addr // ADDRESS_UNIT) & 0x00ffffff # 24 bit addr
	payload = (PLAY << WFM_OP_OFFSET) | ((int(isTA) & 0x1) << TA_PAIR_BIT) | (count << 24) | addr
	return Instruction(header, payload, label)
Beispiel #9
0
	def _parse_response(response):
		tokens = ['']
		fresh = True
		quoted = False
		just_unquoted = False
		for c in response:
			if c == '"':
				if just_unquoted:
					tokens[-1] += '"' # False alarm, this is an escaped ".
					fresh = False
				just_unquoted = quoted
				quoted = not quoted
			elif c == ' ' and not quoted:
				try:
					tokens[-1] = int(tokens[-1], 0)
				except ValueError:
					pass
				tokens.append('')
				fresh = True
				just_unquoted = False
			else:
				tokens[-1] += c
				fresh = False
				just_unquoted = False
		try:
			tokens[-1] = int(tokens[-1], 0)
		except ValueError:
			pass
		if fresh:
			tokens.pop()
		return tokens
Beispiel #10
0
    def test_typed_set(self):

        my_set = caom_util.TypedSet(str, )
        with self.assertRaises(AssertionError):
            my_set.add(float(1.0))
            my_set.add(int(1))
            my_set.add(bool(1))

        self.assertRaises(AssertionError, caom_util.TypedSet, str, float(1.0))

        my_set = caom_util.TypedSet(str, "Test1")
        self.assertEqual(1, len(my_set))
        self.assertEqual("Test1", my_set.pop())

        my_set.add("Test1")
        my_set.add("Test2")
        self.assertEqual(2, len(my_set), "set length")
        with self.assertRaises(AssertionError):
            my_set.add(float(1.0))
            my_set.add(int(1))
            my_set.add(bool(1))
            my_set.add("Test1")

        my_set = caom_util.TypedSet(str, )
        my_set.add("Test1")
        my_set.add("Test1")
        self.assertTrue(len(my_set) == 1)
Beispiel #11
0
def find_new_gid(sysuser, preferred_gid=None):
    defs = get_defs()
    # TODO: Catch errors
    if not sysuser:
        gid_min = int(defs.get("GID_MIN", 1000))
        gid_max = int(defs.get("GID_MAX", 60000))
    else:
        gid_min = int(defs.get("SYS_GID_MIN", 101))
        gid_max = int(defs.get("GID_MIN", 1000))
        gid_max = int(defs.get("SYS_GID_MAX", gid_max))

    if preferred_gid and gid_min < preferred_gid < gid_max:
        try:
            grp.getgrgid(preferred_gid)
        except KeyError:
            return preferred_gid

    if sysuser:
        for uid in range(gid_max, gid_min, -1):
            try:
                grp.getgrgid(uid)
            except KeyError:
                return uid
    else:
        for uid in range(gid_min, gid_max):
            try:
                grp.getgrgid(uid)
            except KeyError:
                return uid

    syslog.syslog(syslog.LOG_WARNING, "no more available GID on the system")
Beispiel #12
0
def register_tasks(namespaces, default_namespace={}, dry_run_suffix=''):
    """Return a Luigi task class after parsed Luigi task metadata.

    :param dict namespaces: Task namespaces.
    :param dict default_namespace: Default namespaces.
    :param unicode dry_run_suffix: Suffix to be added to file created during dry run.
    :rtype: iterable
    """

    for task_name, namespace in namespaces.items():
        action_namespace = default_namespace.copy()
        action_namespace.update(namespace)
        task_keys = ['target_pattern', 'sources_repls', 'action_template', 'SHELL']
        task_namespace = {k: action_namespace[k] for k in task_keys if k in action_namespace}
        task_namespace['sources_repls'] = task_namespace['sources_repls'].split()
        # luigi attributes
        task_namespace['resources'] = {k.partition('_')[2]: int(v) for k, v in namespace.items()
                                       if k.startswith('RESOURCES_')}
        task_namespace.update(
            {k: int(namespace[k]) for k in ['priority', 'disabled', 'worker_timeout']
             if k in namespace})
        yield ReRuleTask.factory(
                task_name, dry_run_suffix=dry_run_suffix, action_namespace=action_namespace,
                **task_namespace
        )
Beispiel #13
0
    def parse_date(self, agetd, entry):
        m = self.age_pattern.search(agetd.text)
        days = None
        hours = None
        if m:
            days = int(m.group("days1"))
            hours = int(m.group("days2")) * 2.4
        else:
            p = re.compile(r"(?P<hours>\d+) hours?")
            m = p.search(agetd.text)
            if m:
                days = 0
                hours = int(m.group("hours"))
        if hours is not None:
            pubdate = arrow.utcnow().replace(days=-days, hours=-1)  # hours because of timezone change below
            if hours > 0:
                pubdate = pubdate.replace(hours=-hours)
            pubdate = pubdate.to("+01:00")  # nzbindex server time, I guess?
            entry.epoch = pubdate.timestamp
            entry.pubdate_utc = str(pubdate)
            entry.age_days = (arrow.utcnow() - pubdate).days
            entry.age = str(entry.age_days) + "d"
            entry.age_precise = True  # Precise to 2.4 hours, should be enough for duplicate detection
            entry.pubDate = pubdate.format("ddd, DD MMM YYYY HH:mm:ss Z")

        else:
            self.error("Found no age info in %s" % str(agetd))
            raise IndexerResultParsingRowException("Unable to parse age")
Beispiel #14
0
    def test_init(self):
        dimension = wcs.Dimension2D(int(1), int(2))
        ref_coord = wcs.Coord2D(wcs.RefCoord(float(9.0), float(10.0)),
                                wcs.RefCoord(float(11.0), float(12.0)))
        cd11 = float(1.1)
        cd12 = float(1.2)
        cd21 = float(2.1)
        cd22 = float(2.2)

        self.assertRaises(TypeError, wcs.CoordFunction2D, None,
                          ref_coord, cd11, cd12, cd21, cd22)
        self.assertRaises(TypeError, wcs.CoordFunction2D, dimension,
                          None, cd11, cd12, cd21, cd22)
        self.assertRaises(TypeError, wcs.CoordFunction2D, dimension,
                          ref_coord, None, cd12, cd21, cd22)
        self.assertRaises(TypeError, wcs.CoordFunction2D, dimension,
                          ref_coord, cd11, None, cd21, cd22)
        self.assertRaises(TypeError, wcs.CoordFunction2D, dimension,
                          ref_coord, cd11, cd12, None, cd22)
        self.assertRaises(TypeError, wcs.CoordFunction2D, dimension,
                          ref_coord, cd11, cd12, cd21, None)

        function = wcs.CoordFunction2D(dimension, ref_coord,
                                       cd11, cd12, cd21, cd22)
        self.assertEqual(function.dimension, dimension)
        self.assertEqual(function.ref_coord, ref_coord)
        self.assertEqual(function.cd11, cd11)
        self.assertEqual(function.cd12, cd12)
        self.assertEqual(function.cd21, cd21)
        self.assertEqual(function.cd22, cd22)
Beispiel #15
0
 def initializeVisibilityMatrix(self):
     global VISIBILITY_MATRIX_CACHE
     if reduce(str.__add__, self.layoutText) not in VISIBILITY_MATRIX_CACHE:
         from .game import Directions
         vecs = [(-0.5, 0), (0.5, 0), (0, -0.5), (0, 0.5)]
         dirs = [
             Directions.NORTH,
             Directions.SOUTH,
             Directions.WEST,
             Directions.EAST]
         vis = Grid(
             self.width,
             self.height,
             {Directions.NORTH: set(),
              Directions.SOUTH: set(),
              Directions.EAST: set(),
              Directions.WEST: set(),
              Directions.STOP: set()})
         for x in range(self.width):
             for y in range(self.height):
                 if self.walls[x][y] == False:
                     for vec, direction in zip(vecs, dirs):
                         dx, dy = vec
                         nextx, nexty = x + dx, y + dy
                         while (nextx + nexty) != int(nextx) + int(nexty) or not self.walls[int(nextx)][int(nexty)]:
                             vis[x][y][direction].add((nextx, nexty))
                             nextx, nexty = x + dx, y + dy
         self.visibility = vis
         VISIBILITY_MATRIX_CACHE[reduce(str.__add__, self.layoutText)] = vis
     else:
         self.visibility = VISIBILITY_MATRIX_CACHE[
             reduce(str.__add__, self.layoutText)]
	def pre_process (self, rec):
		"""
		Clean flanking whitespace, ensure rec has all fields and parse out structured new fields.
		"""
		# make sure that every field in the record & strip flanking wspace
		for f in consts.ALL_NAMES:
			if f in rec.keys():
				rec[f] = str (rec[f]).strip()
			else:
				rec[f] = ''

		# now parse out the structured / metadata fields
		try:
			rec['tags'] = self.parse_tags_str (rec.get ('tags', ''))
			rec['repeat'] = self.parse_repeat_str (rec.get ('repeat', ''))
			return rec
		except:
			print ("Rec %s has problems with its repeat or tag fields" % rec[Column.variable.value])
			raise
		if rec[Column.text_validation_type.value] == 'integer':
			min = rec[Column.text_validation_min.value]
			if min:
				rec[Column.text_validation_min.value] = str ("%d" % int (min))
			max = rec[Column.text_validation_max.value]
			if max:
				rec[Column.text_validation_max.value] = str ("%d" % int (max))
Beispiel #17
0
    def test_init(self):
        self.assertRaises(TypeError, wcs.CoordAxis1D, None)
        self.assertRaises(TypeError, wcs.CoordAxis1D, int(1))

        axis = wcs.Axis("ctype", "cunit")
        axis_1d = wcs.CoordAxis1D(axis)
        self.assertEqual(axis_1d.axis, axis)
        with self.assertRaises(TypeError):
            axis_1d.error = str("s")
            axis_1d.bounds = str("s")
            axis_1d.function = str("s")
            axis_1d.range = str("s")

        error = wcs.CoordError(float(1.0), float(2.0))
        axis_1d.error = error
        self.assertEqual(axis_1d.error, error)

        start = wcs.RefCoord(float(1.0), float(2.0))
        end = wcs.RefCoord(float(3.0), float(4.0))
        coord_range = wcs.CoordRange1D(start, end)
        axis_1d.range = coord_range
        self.assertEqual(axis_1d.range, coord_range)

        bounds = wcs.CoordBounds1D()
        axis_1d.bounds = bounds
        self.assertEqual(axis_1d.bounds, bounds)

        naxis = int(1)
        delta = float(2.5)
        ref_coord = wcs.RefCoord(float(1.0), float(2.0))
        function = wcs.CoordFunction1D(naxis, delta, ref_coord)
        axis_1d.function = function
        self.assertEqual(axis_1d.function, function)
Beispiel #18
0
    def write_entry(self, key, value, encoding):
        if encoding == "hex2bin":
            if len(value) % 2 != 0:
                raise InputError("%s: Invalid data length. Should be multiple of 2." % key)
            value = binascii.a2b_hex(value)

        if encoding == "base64":
            value = binascii.a2b_base64(value)

        if encoding == "string":
            if type(value) == bytes:
                value = value.decode()
            value += '\0'

        encoding = encoding.lower()
        varlen_encodings = ["string", "binary", "hex2bin", "base64"]
        primitive_encodings = ["u8", "i8", "u16", "u32", "i32"]

        if encoding in varlen_encodings:
            try:
                self.cur_page.write_varlen_data(key, value, encoding, self.namespace_idx,self)
            except PageFullError:
                new_page = self.create_new_page()
                new_page.write_varlen_data(key, value, encoding, self.namespace_idx,self)
        elif encoding in primitive_encodings:
            try:
                self.cur_page.write_primitive_data(key, int(value), encoding, self.namespace_idx,self)
            except PageFullError:
                new_page = self.create_new_page()
                new_page.write_primitive_data(key, int(value), encoding, self.namespace_idx,self)
        else:
            raise InputError("%s: Unsupported encoding" % encoding)
Beispiel #19
0
 def makeTranscript(self,root):
     id=root["extra"]["ID"]
     strand=root["strand"]
     transcript=Transcript(id,strand)
     root["object"]=transcript
     transcript.substrate=root["substrate"]
     transcript.source=root["source"]
     transcript.begin=int(root["begin"])
     transcript.end=int(root["end"])
     children=root.get("children",None)
     if(children is None): return transcript
     for child in children:
         obj=self.labelStructure(child)
         if(obj is None): continue
         if(type(obj)==Exon):
             childType=child["type"]
             if(childType=="CDS" or
                re.search("-exon",childType)): transcript.addExon(obj)
             elif(childType=="exon"): transcript.addRawExon(obj)
             elif(re.search("UTR",childType)): transcript.addUTR(obj)
             obj.transcript=transcript
     transcript.parseRawExons()
     transcript.setExonTypes()
     transcript.setUTRtypes()
     transcript.sortExons()
     transcript.adjustOrders()
     extra=root["extra"]
     transcript.extraFields=""
     for key in extra:
         transcript.extraFields+=key+"="+extra[key]+";"
     return transcript
def loadCounts(indiv,hap,filename,changes,hash):
    transcripts=changes.keys()
    for transcript in transcripts:
        if(changes[transcript]=="cryptic-site"):
            if(not rex.find("ALT\d+_(\S+)_(\d+)",transcript)):
                raise Exception(transcript)
            if(int(rex[2])!=hap): continue
            baseTrans=rex[1]+"_"+rex[2]
            if(not expressed.get(rex[1],False)): continue
            rec=hash.get(indiv+baseTrans,None)
            if(not rec): 
                rec=hash[indiv+baseTrans]={"numSites":0}
            rec["numSites"]+=1
    with open(filename,"rt") as fh:
        for line in fh:
            fields=line.split()
            if(len(fields)!=2): continue
            (altID,count)=fields
            if(not changes.get(altID,None)): continue
            type=changes[altID]
            if(type!="cryptic-site"): continue
            if(not rex.find("ALT\d+_(\S+)_(\d+)",altID)):
                raise Exception(baseTrans)
            baseTrans=rex[1]+"_"+rex[2]
            if(hash[indiv+baseTrans].get("supported",None) is None):
                hash[indiv+baseTrans]["supported"]=0
            if(int(count)>=MIN_COUNT):
                hash[indiv+baseTrans]["supported"]+=1
Beispiel #21
0
def _get_terminal_size_linux():
    def ioctl_GWINSZ(fd):
        try:
            import fcntl
            import termios
            cr = struct.unpack('hh',
                               fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
            return cr
        except Exception as e:
            log.error(e)
            pass

    cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
    if not cr:
        try:
            fd = os.open(os.ctermid(), os.O_RDONLY)
            cr = ioctl_GWINSZ(fd)
            os.close(fd)
        except Exception as e:
            log.error(e)
            pass
    if not cr:
        try:
            cr = (os.environ['LINES'], os.environ['COLUMNS'])
        except Exception as e:
            log.error(e)
            return None
    return int(cr[1]), int(cr[0])
Beispiel #22
0
def _parse_percents(male_percent_str, female_percent_str):
    if not (male_percent_str or female_percent_str):
        # Neither specified. 50% each.
        male_percent = 50
        female_percent = 50
    else:
        r = re.compile('^\d+$')
        male_percent = None
        female_percent = None
        if male_percent_str:
            if not r.match(male_percent_str):
                _die_usage('Bad male percent: {0}'.format(male_percent_str))
            male_percent = int(male_percent_str)

        if female_percent_str:
            if not r.match(female_percent_str):
                _die_usage('Bad female percent: {0}'.format(female_percent_str))
            female_percent = int(female_percent_str)


        if not female_percent:
            # We have a male value, but not a female one.
            female_percent = 100 - male_percent
        if not male_percent:
            # We have a male value, but not a female one.
            male_percent = 100 - female_percent

    if (male_percent + female_percent) != 100:
        _die_usage("Male and female percentages don't add up to 100.")

    return (male_percent, female_percent)
Beispiel #23
0
    def update_usage(self, line=None):
        """
        Check usage percentage for this mountpoint.
        Returns dictionary with usage details.
        """
        if self.filesystem in PSEUDO_FILESYSTEMS:
            return {}

        if line is None:
            parser = ShellCommandParser()
            try:
                stdout, stderr = parser.execute('df', '-Pk', self.mountpoint)
            except ShellCommandParserError:
                raise FileSystemError('Error getting usage for {0}'.format(self.mountpoint))

            header, usage = stdout.split('\n', 1)
            try:
                usage = ' '.join(usage.split('\n'))
            except ValueError:
                pass

        else:
            usage = ' '.join(line.split('\n'))

        fs, size, used, free, percent, mp = [x.strip() for x in usage.split()]
        percent = percent.rstrip('%')

        self.usage = {
            'mountpoint': self.mountpoint,
            'size': int(size),
            'used': int(used),
            'free': int(free),
            'percent': int(percent),
        }
Beispiel #24
0
    def get_package_stats(self, pid=None, root=None, owner=None):
        qry = ("SELECT p.pid, SUM(f.size) AS sizetotal, COUNT(f.fid) AS linkstotal, sizedone, linksdone "
               "FROM packages p JOIN files f ON p.pid = f.package AND f.dlstatus > 0 {0} LEFT OUTER JOIN "
               "(SELECT p.pid AS pid, SUM(f.size) AS sizedone, COUNT(f.fid) AS linksdone "
               "FROM packages p JOIN files f ON p.pid = f.package {0} AND f.dlstatus in (5,6) GROUP BY p.pid) s ON s.pid = p.pid "
               "GROUP BY p.pid")

        # status in (finished, skipped, processing)

        if root is not None:
            self.c.execute(qry.format(
                "AND (p.root=:root OR p.pid=:root)"), locals())
        elif pid is not None:
            self.c.execute(qry.format("AND p.pid=:pid"), locals())
        elif owner is not None:
            self.c.execute(qry.format("AND p.owner=:owner"), locals())
        else:
            self.c.execute(qry.format(""))

        data = {}
        for r in self.c.fetchall():
            data[r[0]] = PackageStats(
                r[2] if r[2] else 0,
                r[4] if r[4] else 0,
                int(r[1]) if r[1] else 0,
                int(r[3]) if r[3] else 0,
            )

        return data
Beispiel #25
0
    def write_data(self, data, dstart=None, reshape_order='C'):
        """Write ``data`` to `file`.

        Parameters
        ----------
        data : `array-like`
            Data that should be written to `file`.
        dstart : non-negative int, optional
            Offset in bytes of the start position of the written data.
            By default, it is taken to be `header_size`.
        reshape_order : {'C', 'F', 'A'}, optional
            Value passed as ``order`` parameter to `numpy.reshape`.
            Reshaping is only done in case the whole data block is read.

        See Also
        --------
        write_header
        """
        data = np.asarray(data).reshape(-1, order=reshape_order)
        if dstart is None:
            dstart = int(self.header_size)
        elif dstart < 0:
            raise ValueError('`dstart` must be non-negative, got {}'
                             ''.format(dstart))
        else:
            dstart = int(dstart)

        if dstart < self.header_size:
            raise ValueError('invalid `dstart`, resulting in absolute '
                             '`dstart` < `header_size` ({} < {})'
                             ''.format(dstart, self.header_size))

        self.file.seek(dstart)
        data.tofile(self.file)
Beispiel #26
0
    def update_usage(self, line=None):
        """
        Check usage percentage for this mountpoint.
        Returns dictionary with usage details.
        """

        if line is None:
            parser = ShellCommandParser()
            try:
                stdout, stderr = parser.execute('df', '-k', self.mountpoint)
            except ShellCommandParserError as e:
                raise FileSystemError('Error checking filesystem usage: {0}'.format(e))

            header, usage = stdout.split('\n', 1)

        else:
            usage = line

        m = RE_DF.match(usage)
        if not m:
            raise FileSystemError('Error matching df output line: {0}'.format(usage))

        self.usage = {
            'mountpoint': self.mountpoint,
            'size': int(m.group(2)),
            'used': int(m.group(3)),
            'free': int(m.group(4)),
            'percent': int(m.group(5))
        }
Beispiel #27
0
    def getFeatures(self, state, action):
        # extract the grid of food and wall locations and get the ghost
        # locations
        food = state.getFood()
        walls = state.getWalls()
        ghosts = state.getGhostPositions()

        features = util.Counter()

        features["bias"] = 1.0

        # compute the location of pacman after he takes the action
        x, y = state.getPacmanPosition()
        dx, dy = Actions.directionToVector(action)
        next_x, next_y = int(x + dx), int(y + dy)

        # count the number of ghosts 1-step away
        features["#-of-ghosts-1-step-away"] = sum((next_x, next_y) in Actions.getLegalNeighbors(g, walls)
                                                  for g in ghosts)

        # if there is no danger of ghosts then add the food feature
        if not features["#-of-ghosts-1-step-away"] and food[next_x][next_y]:
            features["eats-food"] = 1.0

        dist = closestFood((next_x, next_y), food, walls)
        if dist is not None:
            # make the distance a number less than one otherwise the update
            # will diverge wildly
            features["closest-food"] = old_div(float(dist), \
                (walls.width * walls.height))
        features.divideAll(10.0)
        return features
Beispiel #28
0
def submitted_jobs_user(username=None, path=None):
    """
    returns info on submitted jobs for given user
    as a list of dictionaries
    """
    if username is None:
        username = get_current_user()
    output = subprocess.Popen(['condor_q', username, "-long"],
                              stdout=subprocess.PIPE).communicate()[0]
    jobs = output.strip().split("\n\n")
    joblist = []
    for job in jobs:
        j = re.findall("ClusterId = (.*)\n", job)
        if len(j) == 0 and len(jobs) <= 1:
            return []
        job_id = j[0]
        directory = re.findall('Iwd = "(.*)"\n', job)[0]
        run_id = int(re.findall('UserLog = ".*/([0-9]*).log"\n', job)[0])
        status = int(re.findall('\nJobStatus = ([0-9]*)\n', job)[0])
        if path is not None and not directory.startswith(path):
            continue
        joblist.append(
            dict(job_exp_id=job_id,
                 directory=directory,
                 run_exp_id=run_id,
                 status=status))
    return joblist
Beispiel #29
0
    def loadGFF_transcript(self,fields,line,transcriptBeginEnd,GFF,
                           transcripts,readOrder,genes):
        begin=int(fields[3])-1
        end=int(fields[4])
        rex=Rex()
        if(rex.find('transcript_id[:=]?\s*"?([^\s";]+)"?',line)):
            transcriptId=rex[1]
            transcriptBeginEnd[transcriptId]=[begin,end]
            strand=fields[6]
            transcriptExtraFields=""
            for i in range(8,len(fields)):
                transcriptExtraFields+=fields[i]+" "
            transcript=transcripts.get(transcriptId,None)
            if(transcript is None):
                transcripts[transcriptId]=transcript= \
	                                   Transcript(transcriptId,strand)
                transcript.setStopCodons(self.stopCodons)
                transcript.readOrder=readOrder;
                readOrder+=1
                transcript.substrate=fields[0]
                transcript.source=fields[1]
                transcript.setBegin(begin)
                transcript.setEnd(end)
            geneId=None
            if(rex.find("genegrp=(\S+)",line)): geneId=rex[1]
            elif(rex.find('gene_id[:=]?\s*\"?([^\s\;"]+)\"?',line)):
                geneId=rex[1]
            if(not geneId): raise Exception("can't parse GTF: "+line)
            transcript.geneId=geneId
            gene=genes.get(geneId,None)
            if(not gene): genes[geneId]=gene=Gene(); gene.setId(geneId)
            transcript.setGene(gene)
            gene.addTranscript(transcript)
            transcript.extraFields=transcriptExtraFields
def addThumbnailBase64():
	images = {}
	delete_all = False
	if delete_all:
		for d in getCollDrops().find({"profile_image_url_big_base64":{'$exists':True}}):
			del d["profile_image_url_big_base64"]
			del d["profile_image_url_small_base64"]
			getCollDrops().save(d, safe=True)
	cnt = getCollDrops().find({"profile_image_url_big_base64":{'$exists':False}}).count()
	print("numdrops:",cnt)			
	clog("making cache")			
	for d in getCollDrops().find({"profile_image_url_big_base64":{'$exists':True}}):
		bt = d["user"]["profile_image_url"].replace("_normal", "_reasonably_small")
		images[bt] = (d["profile_image_url_big_base64"], d["profile_image_url_small_base64"])		
	clog("making thumbnails")
	for d in getCollDrops().find({"profile_image_url_big_base64":{'$exists':False}}):
		bt = d["user"]["profile_image_url"].replace("_normal", "_reasonably_small")
		cnt -= 1;
		if cnt%50==0:
			print(cnt, bt)
		if bt not in images:
			try:
				data = urllib.request.urlopen(bt).read()
				fdata = cStringIO.StringIO(data)
				i = Image.open(fdata)
			except (Exception) as e:
				print(e)
				data = urllib.request.urlopen(d["user"]["profile_image_url"]).read()
				fdata = cStringIO.StringIO(data)
				i = Image.open(fdata)
			if old_div(float(i.size[1]), float(i.size[0]))>0.5:
				if cnt%10==0:
					print(d["screen_name"], bt)					
				nh = float(i.size[1]) / float(i.size[0]) * 80			
				i = i.resize((80, int(nh)),Image.ANTIALIAS)
				outdata = cStringIO.StringIO()
				bts = bt.split(".")
				format = bts[len(bts)-1].lower()			
				if format=="jpg":
					format = "jpeg"
				if format!="jpeg" and format!="png" and format!="gif":
					pass
				else:
					i.save(outdata, format)
					d["profile_image_url_big_base64"] = "data:image/"+format+";base64,"+base64.encodestring(outdata.getvalue())
					nh = float(i.size[1]) / float(i.size[0]) * 30
					i = i.resize((30, int(nh)),Image.ANTIALIAS)
					outdata = cStringIO.StringIO()
					i.save(outdata, format)		
					d["profile_image_url_small_base64"] = "data:image/"+format+";base64,"+base64.encodestring(outdata.getvalue())
					images[bt] = (d["profile_image_url_big_base64"], d["profile_image_url_small_base64"])
			else:
				print("skipping:", old_div(float(i.size[1]), float(i.size[0])), bt)
		else:
			if cnt%10==0:			
				print(d["screen_name"])
			d["profile_image_url_big_base64"] = images[bt][0]
			d["profile_image_url_small_base64"] = images[bt][1]
		getCollDrops().save(d, safe=True)
Beispiel #31
0
def _multinode_transfer(method, dest, source, dst, username, ssh_private_key,
                        rls, mpt):
    # type: (str, DestinationSettings, SourceSettings, str, str,
    #        pathlib.Path, dict, int) -> None
    """Transfer data to multiple destination nodes simultaneously
    :param str method: transfer method
    :param DestinationSettings dest: destination settings
    :param SourceSettings source: source settings
    :param str dst: destination path
    :param str username: username
    :param pathlib.Path: ssh private key
    :param dict rls: remote login settings
    :param int mpt: max parallel transfers per node
    """
    src = source.path
    src_incl = source.include
    src_excl = source.exclude
    psrc = pathlib.Path(src)
    # if source isn't a directory, convert it using src_incl
    if not psrc.is_dir():
        src_excl = None
        src_incl = [src]
        src = str(psrc.parent)
        psrc = psrc.parent
    # if split is specified, force to multinode_scp
    if (dest.data_transfer.split_files_megabytes is not None
            and method != 'multinode_scp'):
        logger.warning('forcing transfer method to multinode_scp with split')
        method = 'multinode_scp'
    buckets = {}
    files = {}
    rcodes = {}
    spfiles = []
    spfiles_count = {}
    spfiles_count_lock = threading.Lock()
    for rkey in rls:
        buckets[rkey] = 0
        files[rkey] = []
        rcodes[rkey] = None
    # walk the directory structure
    # 1. construct a set of dirs to create on the remote side
    # 2. binpack files to different nodes
    total_files = 0
    dirs = set()
    if dest.relative_destination_path is not None:
        dirs.add(dest.relative_destination_path)
    for entry in util.scantree(src):
        rel = pathlib.Path(entry.path).relative_to(psrc)
        sparent = str(pathlib.Path(entry.path).relative_to(psrc).parent)
        if entry.is_file():
            srel = str(rel)
            # check filters
            if src_excl is not None:
                inc = not any([fnmatch.fnmatch(srel, x) for x in src_excl])
            else:
                inc = True
            if src_incl is not None:
                inc = any([fnmatch.fnmatch(srel, x) for x in src_incl])
            if not inc:
                logger.debug('skipping file {} due to filters'.format(
                    entry.path))
                continue
            if dest.relative_destination_path is None:
                dstpath = '{}{}'.format(dst, rel)
            else:
                dstpath = '{}{}/{}'.format(dst, dest.relative_destination_path,
                                           rel)
            # get key of min bucket values
            fsize = entry.stat().st_size
            if (dest.data_transfer.split_files_megabytes is not None
                    and fsize > dest.data_transfer.split_files_megabytes):
                nsplits = int(
                    math.ceil(fsize /
                              dest.data_transfer.split_files_megabytes))
                lpad = int(math.log10(nsplits)) + 1
                spfiles.append(dstpath)
                spfiles_count[dstpath] = nsplits
                n = 0
                curr = 0
                while True:
                    end = curr + dest.data_transfer.split_files_megabytes
                    if end > fsize:
                        end = fsize
                    key = min(buckets, key=buckets.get)
                    buckets[key] += (end - curr)
                    if n == 0:
                        dstfname = dstpath
                    else:
                        dstfname = '{}.{}{}'.format(dstpath,
                                                    _FILE_SPLIT_PREFIX,
                                                    str(n).zfill(lpad))
                    files[key].append((entry.path, dstfname, curr, end))
                    if end == fsize:
                        break
                    curr = end
                    n += 1
            else:
                key = min(buckets, key=buckets.get)
                buckets[key] += fsize
                files[key].append((entry.path, dstpath, None, None))
            total_files += 1
        # add directory to create
        if sparent != '.':
            if dest.relative_destination_path is None:
                dirs.add(sparent)
            else:
                dirs.add('{}/{}'.format(dest.relative_destination_path,
                                        sparent))
    total_size = sum(buckets.values())
    if total_files == 0:
        logger.error('no files to ingress')
        return
    # create remote directories via ssh
    if len(dirs) == 0:
        logger.debug('no remote directories to create')
    else:
        logger.debug('creating remote directories: {}'.format(dirs))
        dirs = ['mkdir -p {}'.format(x) for x in list(dirs)]
        dirs.insert(0, 'cd {}'.format(dst))
        _rls = next(iter(rls.values()))
        ip = _rls.remote_login_ip_address
        port = _rls.remote_login_port
        del _rls
        mkdircmd = ('ssh -T -x -o StrictHostKeyChecking=no '
                    '-o UserKnownHostsFile={} -i {} -p {} {}@{} {}'.format(
                        os.devnull, ssh_private_key, port, username, ip,
                        util.wrap_commands_in_shell(dirs)))
        rc = util.subprocess_with_output(mkdircmd,
                                         shell=True,
                                         suppress_output=True)
        if rc == 0:
            logger.info('remote directories created on {}'.format(dst))
        else:
            logger.error('remote directory creation failed')
            return
        del ip
        del port
    logger.info(
        'ingress data: {0:.4f} MiB in {1} files to transfer, using {2} max '
        'parallel transfers per node'.format(total_size / _MEGABYTE,
                                             total_files, mpt))
    logger.info('begin ingressing data from {} to {}'.format(src, dst))
    nodekeys = list(buckets.keys())
    threads = []
    start = datetime.datetime.now()
    for i in range(0, len(buckets)):
        nkey = nodekeys[i]
        thr = threading.Thread(
            target=_multinode_thread_worker,
            args=(method, mpt, nkey, rcodes, files[nkey], spfiles_count,
                  spfiles_count_lock, rls[nkey].remote_login_ip_address,
                  rls[nkey].remote_login_port, username, ssh_private_key,
                  dest.data_transfer.scp_ssh_extra_options,
                  dest.data_transfer.rsync_extra_options))
        threads.append(thr)
        thr.start()
    for i in range(0, len(buckets)):
        threads[i].join()
    diff = datetime.datetime.now() - start
    del threads
    success = True
    for nkey in rcodes:
        if rcodes[nkey] != 0:
            logger.error('data ingress failed to node: {}'.format(nkey))
            success = False
    if success:
        logger.info(
            'finished ingressing {0:.4f} MB of data in {1} files from {2} to '
            '{3} in {4:.2f} sec ({5:.3f} Mbit/s)'.format(
                total_size / _MEGABYTE, total_files, src, dst,
                diff.total_seconds(),
                (total_size * 8 / 1e6) / diff.total_seconds()))
Beispiel #32
0
path = r'C:\Users\nailt\Desktop\data-mining-git\table.xlsx'
file_read = open(path,'r')
excel_data = file_read.readlines
wb = xlrd.open_workbook(path)
sheet_names = wb.sheet_names()

for sheet in wb.sheets():
    nrows = sheet.nrows
    ncols = sheet.ncols
    dataframe = []
    for row in range(nrows):
        row_carrier = []
        for col in range(ncols):
            value = sheet.cell(row,col).value
            try:
                value = str(int(value))
            except ValueError:
                pass
            finally:
                row_carrier.append(value)
        dataframe.append(row_carrier)

adres_list = []
for i in range(1,nrows-1):
    if dataframe[i][0] != dataframe[i+1][0]:
        adres_list.append(dataframe[i][0])
    else:
        pass
dataframe[1]
df = pd.DataFrame(adres_list)
df.to_csv('adres_list.csv',index = False)
Beispiel #33
0
def train(hyp, opt, device):
    save_dir, epochs, batch_size, total_batch_size, weights, rank = \
    Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
    do_semi = opt.do_semi
    # Directories
    wdir = save_dir / 'weights'
    wdir.mkdir(parents=True, exist_ok=True)
    last = wdir / 'last.pt'
    best = wdir / 'best.pt'
    results_file = save_dir / 'results.txt'

    # Save run settings
    with open(save_dir / 'hyp.yaml', 'w') as f:
        yaml.dump(hyp, f, sort_keys=False)
    with open(save_dir / 'opt.yaml', 'w') as f:
        yaml.dump(vars(opt), f, sort_keys=False)

    # Configure
    plots = not opt.evolve  #create plots
    cuda = device.type != 'cpu'
    init_seeds(2 + rank)
    with open(opt.data) as f:
        data_dict = yaml.load(f, Loader=yaml.SafeLoader)
    nc = 1 if opt.single_cls else int(data_dict['nc'])  #number of classes
    names = ['item'] if opt.single_cls and len(
        data_dict['names']) != 1 else data_dict['names']
    assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (
        len(names), nc, opt.data)

    # Model
    pretrained = weights.endswith('.pt')
    if pretrained:
        with torch_distributed_zero_first(rank):
            attempt_download(weights)  # download if not found locally
        ckpt = torch.load(weights, map_location=device)  #load checkpoint
        model = Model(opt.cfg or ckpt['model'].yaml,
                      ch=3,
                      nc=nc,
                      anchors=hyp.get('anchors')).to(device)  #create
        exclude = [
            'anchor'
        ] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [
        ]  #exclude keys
        state_dict = ckpt['model'].float().state_dict()  # to FP32
        state_dict = intersect_dicts(state_dict,
                                     model.state_dict(),
                                     exclude=exclude)  #intersect
        model.load_state_dict(state_dict, strict=False)  #load

    else:
        model = Model(opt.cfg, ch=3, nc=nc,
                      anchors=hyp.get('anchors')).to(device)

    with torch_distributed_zero_first(rank):
        check_dataset(data_dict)  #check
    train_path = data_dict['train']
    test_path = data_dict['val']

    # Optimizer
    nbs = 64
    accumulate = max(round(nbs / total_batch_size),
                     1)  # accumulate loss before optimizing
    hyp['weight_decay'] *= total_batch_size * accumulate / nbs

    pg0, pg1, pg2 = [], [], []  # optimizer parameter groups
    for k, v in model.named_modules():
        if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
            pg2.append(v.bias)  # biases
        if isinstance(v, nn.BatchNorm2d):
            pg0.append(v.weight)  # no decay
        elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
            pg1.append(v.weight)  # apply dacay

    if opt.adam:
        optimizer = optim.Adam(pg0,
                               lr=hyp['lr0'],
                               betas=(hyp['momentum'],
                                      0.999))  # adjust betal to momentum
    else:
        optimizer = optim.SGD(pg0,
                              lr=hyp['lr0'],
                              momentum=hyp['momentum'],
                              nesterov=True)

    optimizer.add_param_group({
        'params': pg1,
        'weight_decay': hyp['weight_decay']
    })  # add pg1 with weight_decay
    optimizer.add_param_group({'params': pg2})  # add pg2 (biases)
    del pg0, pg1, pg2

    if opt.linear_lr:
        lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp[
            'lrf']  # linear
    else:
        lf = one_cycle(1, hyp['lrf'], epochs)
    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)

    # EMA
    ema = ModelEMA(model) if rank in [-1, 0] else None

    # Resume
    start_epoch, best_fitness = 0, 0.0

    if pretrained:
        # optimizer
        if ckpt['optimizer'] is not None:
            optimizer.load_state_dict(ckpt['optimizer'])
            best_fitness = ckpt['best_fitness']

        # EMA
        if ema and ckpt.get('ema'):
            ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
            ema.updates = ckpt['updates']

        # Results
        if ckpt.get('training_results') is not None:
            results_file.write_text(
                ckpt['training_results'])  # write results.txt

        # Epochs
        start_epoch = ckpt['epoch'] + 1
        if opt.resume:
            assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (
                weight, epochs)
        if epochs < start_epoch:
            epochs += ckpt['epoch']
        del ckpt, state_dict

        # Image sizes
        gs = max(int(model.stride.max()), 32)  # grid size (max stride)
        nl = model.model[
            -1].nl  # number of detection layer (used for scaling hyp['obj])
        imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size
                             ]  # verify imgsz are gs-multiples

        # DP mode
        if cuda and rank == -1 and torch.cuda.device_count() > 1:
            model = torch.nn.DataParallel(model)

        # SyncBatchNorm
        if opt.sync_bn and cuda and rank != -1:
            model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(
                device)

        # Trainloader
    if do_semi:
        dataloader, dataset, unlabeldataloader = create_dataloader(
            train_path,
            imgsz,
            batch_size,
            gs,
            opt,
            hyp=hyp,
            augment=True,
            cache=opt.cache_images,
            rect=opt.rect,
            rank=rank,
            world_size=opt.world_size,
            workers=opt.workers,
            image_weights=opt.image_weights,
            quad=opt.quad,
            prefix=colorstr('train: '),
            do_semi=opt.do_semi)
    else:
        dataloader, dataset = create_dataloader(
            train_path,
            imgsz,
            batch_size,
            gs,
            opt,
            hyp=hyp,
            augment=True,
            cache=opt.cache_images,
            rect=opt.rect,
            rank=rank,
            world_size=opt.world_size,
            workers=opt.workers,
            image_weights=opt.image_weights,
            quad=opt.quad,
            prefix=colorstr('train: '),
            do_semi=opt.do_semi)

    # Train teacher model
    mlc = np.concatenate(dataset.labels, 0)[:, 0].max()  # max label class
    nb = len(dataloader)  # number of batches

    assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (
        mlc, nc, opt.data, nc - 1)

    # process 0
    if rank in [-1, 0]:
        testloader = create_dataloader(
            test_path,
            imgsz_test,
            batch_size * 2,
            gs,
            opt,  # testloader
            hyp=hyp,
            cache=opt.cache_images and not opt.notest,
            rect=True,
            rank=-1,
            world_size=opt.world_size,
            workers=opt.workers,
            pad=0.5,
            prefix=colorstr('val: '),
            do_semi=False)[0]

        if not opt.resume:
            labels = np.concatenate(dataset.labels, 0)
            c = torch.tensor(labels[:, 0])  # classes
            # Anchors
            if not opt.noautoanchor:
                check_anchors(dataset,
                              model=model,
                              thr=hyp['anchor_t'],
                              imgsz=imgsz)
            model.half().float()  # pre-reduce anchor precision

    # DDP mode
    if cuda and rank != 1:
        model = DDP(model,
                    device_ids=[opt.local_rank],
                    output_device=opt.local_rank,
                    find_unused_parameters=any(
                        isinstance(layer, nn.MultiheadAttention)
                        for layer in model.modules()))

    # Model parameters
    hyp['box'] *= 3. / nl  # scale to layers
    hyp['cls'] *= nc / 80. * 3. / nl  # scale to classes and layers
    hyp['obj'] *= (imgsz / 640)**2 * 3. / nl  # scale to image size and layers
    hyp['label_smoothing'] = opt.label_smoothing
    model.nc = nc  # attach number of classes to model
    model.hyp = hyp  # attach hyperparameters to model
    model.gr = 1.0  # iou loss ratio (obj_loss = 1.0 or iou)
    model.class_weights = labels_to_class_weights(
        dataset.labels, nc).to(device) * nc  # attach class weights
    model.names = names

    # Train teacher model --> burn in
    t0 = time.time()
    nw = max(round(hyp['warmup_epochs'] * nb),
             1000)  # number of warmup iterations, max(3 epochs, 1k iterations)
    # nw = min(nw, (epochs - start_epoch) / 2 * nb)  # limit warmup to < 1/2 of training
    maps = np.zeros(nc)  # mAP per class
    results = (0, 0, 0, 0, 0, 0, 0
               )  # P, R, [email protected], [email protected], val_loss(box, obj, cls)
    scheduler.last_epoch = start_epoch - 1  # do not move
    scaler = amp.GradScaler(enabled=cuda)
    compute_loss = ComputeLoss(model)  # init loss class
    burnin_epochs = epochs / 2

    # burn in
    for epoch in range(start_epoch,
                       burnin_epochs):  # epoch-------------------------
        model.train()
        nb = len(dataloader)
        mloss = torch.zeros(4, device=device)  # mean loss
        if rank != -1:
            dataloader.sampler.set_epoch(epoch)
        pbar = enumerate(dataloader)

        if rank in [-1, 0]:
            pbar = tqdm(pbar, total=nb)
        optimizer.zero_grad()
        for i, (imgs, targets, paths, _) in pbar:
            ni = i + nb * epoch  # number integrated batches (since train start)
            imgs = imgs.to(device, non_blocking=True).float(
            ) / 255.0  # uint8 to float32, 0-255 to 0.0-1.0

            # Warm up
            if ni <= [0, nw]:
                xi = [0, nw]
                accumulate = max(
                    1, np.interp(ni, xi, [1, nbs / total_batch_size].round()))
                for j, x in enumerate(optimizer.param_groups):
                    # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
                    x['lr'] = np.interp(ni, xi, [
                        hyp['warmup_bias_lr'] if j == 2 else 0.0,
                        x['initial_lr'] * lf(epoch)
                    ])
                    if 'momentum' in x:
                        x['momentum'] = np.interp(
                            ni, xi, [hyp['warmup_momentum'], hyp['momentum']])

            # Forward
            with amp.autocast(enabled=cuda):
                pred = model(imgs)  # forward
                loss, loss_item = compute_loss(
                    pred, targets.to(device))  # loss scaled by batch_size
                if rank != -1:
                    loss *= opt.world_size  # gradient averaged between device in DDP mode
                if opt.quad:
                    loss *= 4.

            # Backward
            scaler.scale(loss).backward()

            # Optimize
            if ni % accumulate == 0:
                scaler.step(optimizer)
                scaler.update()
                optimizer.zero_grad()
                if ema:
                    ema.update(model)

            # print
            if rank in [-1, 0]:
                mloss = (mloss * i + loss_item) / (i + 1)  # update mean losses
                mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9
                                 if torch.cuda.is_available() else 0)  # (GB)
                s = ('%10s' * 2 +
                     '%10.4g' * 6) % ('%g/%g' % (epoch, epochs - 1), mem,
                                      *mloss, targets.shape[0], imgs.shape[-1])
                pbar.set_description(s)

        scheduler.step()

        # DDP process 0 or single-GPU
        if rank in [-1, 0]:
            # mAP
            ema.update_attr(model,
                            include=[
                                'yaml', 'nc', 'hyp', 'gr', 'names', 'stride',
                                'class_weights'
                            ])
            final_epoch = epoch + 1 == epochs
            if not opt.notest or final_epoch:  # Calculate mAP

                results, maps, times = test.test(data_dict,
                                                 batch_size=batch_size * 2,
                                                 imgsz=imgsz_test,
                                                 model=ema.ema,
                                                 single_cls=opt.single_cls,
                                                 dataloader=testloader,
                                                 save_dir=save_dir,
                                                 verbose=nc < 50
                                                 and final_epoch,
                                                 plots=plots and final_epoch,
                                                 compute_loss=compute_loss)

        fi = fitness(np.array(results).reshape(
            1, -1))  # weighted combination of [P, R, mAP@50, [email protected]]
        if fi > best_fitness:
            best_fitness = fi

        if (not opt.nosave) or (final_epoch and not opt.evolve):  # if save
            ckpt = {
                'epoch':
                epoch,
                'best_fitness':
                best_fitness,
                'training_results':
                results_file.read_text(),
                'model':
                deepcopy(model.module if is_parallel(model) else model).half(),
                'ema':
                deepcopy(ema.ema).half(),
                'updates':
                ema.updates,
                'optimizer':
                optimizer.state_dict()
            }
            if best_fitness == fi:
                torch.save(ckpt, best)
            del ckpt

        # end epoch ----------------------------------------------------------------------------
    # end warm up

    # get persudo label
    # STAC
    # first apply weak augmentation on unlabeled dataset then use teacher net to predict the persudo labels
    # Then apply strong augmentation on unlabeled dataset, use student net to get the logists and compute the unlabeled loss.

    model.eval()
    img = []
    target = []
    Path = []
    imgsz = opt.img_size
    for idx, batch in tqdm(enumerate(unlabeldataloader),
                           total=len(unlabeldataloader)):
        imgs0, _, path, _ = batch  # from uint8 to float16

        with torch.no_grad():
            pred = model(imgs0.to(device, non_blocking=True).float() /
                         255.0)[0]

        gn = torch.tensor(imgs0.shape)[[3, 2, 3, 2]]
        pred = non_max_suppression(pred,
                                   opt.conf_thres,
                                   opt.iou_thres,
                                   classes=opt.classes,
                                   agnostic=opt.agnostic_nms)

        for index, pre in enumerate(pred):
            predict_number = len(pre)
            if predict_number == 0:
                continue
            Class = pre[:, 5].view(predict_number, 1).cpu()
            XYWH = (xyxy2xywh(pre[:, :4])).cpu()
            XYWH /= gn
            pre = torch.cat((torch.zeros(predict_number, 1), Class, XYWH),
                            dim=1)
            img.append(imgs0[index])
            target.append(pre)
            Path.append(path[index])

    unlabeldataset = semiDataset(img, target, Path)
    del img, targets, Path
    model.train()
Beispiel #34
0
    return os.path.exists(h_path)


def add_version_header(tc):
    tc["headers"].append("version")
    return tc


feature_test_macros = sorted(
    [
        add_version_header(x) for x in [
            # C++14 macros
            {
                "name": "__cpp_lib_integer_sequence",
                "values": {
                    "c++14": int(201304)
                },
                "headers": ["utility"],
            },
            {
                "name": "__cpp_lib_exchange_function",
                "values": {
                    "c++14": int(201304)
                },
                "headers": ["utility"],
            },
            {
                "name": "__cpp_lib_tuples_by_type",
                "values": {
                    "c++14": int(201304)
                },
Beispiel #35
0
 def isinf(self):
     """ Returns True if the element is infinity."""
     return int(_C.G2_ELEM_is_at_infinity(self.group.bpg, self.elem)) == 1
Beispiel #36
0
    def process_query_result(self, html, searchRequest, maxResults=None):
        self.debug("Started processing results")

        entries = []
        countRejected = self.getRejectedCountDict()
        logger.debug("Using HTML parser %s" %
                     config.settings.searching.htmlParser)
        soup = BeautifulSoup(html, config.settings.searching.htmlParser)
        main_table = soup.find(id="results").find('table')

        if "No results found" in soup.text:
            return IndexerProcessingResult(
                entries=[],
                queries=[],
                total=0,
                total_known=True,
                has_more=False,
                rejected=self.getRejectedCountDict())
        if not main_table or not main_table.find("tbody"):
            self.error("Unable to find main table in NZBIndex page: %s..." %
                       html[:500])
            self.debug(html[:500])
            raise IndexerResultParsingException(
                "Unable to find main table in NZBIndex page", self)

        items = main_table.find("tbody").find_all('tr')

        for row in items:
            try:
                entry = self.parseRow(row)
            except IndexerResultParsingRowException:
                continue
            accepted, reason, ri = self.accept_result(entry, searchRequest,
                                                      self.supportedFilters)
            if accepted:
                entries.append(entry)
            else:
                countRejected[ri] += 1
                self.debug("Rejected search result. Reason: %s" % reason)
        try:
            page_links = main_table.find("tfoot").find_all("tr")[1].find_all(
                'a')
            if len(page_links) == 0:
                total = len(entries)
                has_more = False
            else:
                pagecount = int(page_links[-2].text)
                currentpage = int(
                    main_table.find("tfoot").find_all("tr")[1].find(
                        "b").text)  #Don't count "next"
                has_more = pagecount > currentpage
                total = self.limit * pagecount  #Good enough
        except Exception:
            self.exception("Error while trying to find page count")
            total = len(entries)
            has_more = False

            self.debug("Finished processing results")
        return IndexerProcessingResult(entries=entries,
                                       queries=[],
                                       total=total,
                                       total_known=True,
                                       has_more=has_more,
                                       rejected=countRejected)
Beispiel #37
0
    def _generate_standard_design(self, infolist, functional_runs=None,
                                  realignment_parameters=None, outliers=None):
        """ Generates a standard design matrix paradigm given information about
            each run
        """
        sessinfo = []
        output_units = 'secs'
        if 'output_units' in self.inputs.traits():
            output_units = self.inputs.output_units

        for i, info in enumerate(infolist):
            sessinfo.insert(i, dict(cond=[]))
            if isdefined(self.inputs.high_pass_filter_cutoff):
                sessinfo[i]['hpf'] = \
                    np.float(self.inputs.high_pass_filter_cutoff)

            if hasattr(info, 'conditions') and info.conditions is not None:
                for cid, cond in enumerate(info.conditions):
                    sessinfo[i]['cond'].insert(cid, dict())
                    sessinfo[i]['cond'][cid]['name'] = info.conditions[cid]
                    scaled_onset = scale_timings(info.onsets[cid],
                                                 self.inputs.input_units,
                                                 output_units,
                                                 self.inputs.time_repetition)
                    sessinfo[i]['cond'][cid]['onset'] = scaled_onset
                    scaled_duration = scale_timings(info.durations[cid],
                                                    self.inputs.input_units,
                                                    output_units,
                                                    self.inputs.time_repetition)
                    sessinfo[i]['cond'][cid]['duration'] = scaled_duration
                    if hasattr(info, 'amplitudes') and info.amplitudes:
                        sessinfo[i]['cond'][cid]['amplitudes'] = \
                            info.amplitudes[cid]

                    if hasattr(info, 'tmod') and info.tmod and \
                            len(info.tmod) > cid:
                        sessinfo[i]['cond'][cid]['tmod'] = info.tmod[cid]

                    if hasattr(info, 'pmod') and info.pmod and \
                            len(info.pmod) > cid:
                        if info.pmod[cid]:
                            sessinfo[i]['cond'][cid]['pmod'] = []
                            for j, name in enumerate(info.pmod[cid].name):
                                sessinfo[i]['cond'][cid]['pmod'].insert(j, {})
                                sessinfo[i]['cond'][cid]['pmod'][j]['name'] = \
                                    name
                                sessinfo[i]['cond'][cid]['pmod'][j]['poly'] = \
                                    info.pmod[cid].poly[j]
                                sessinfo[i]['cond'][cid]['pmod'][j]['param'] = \
                                    info.pmod[cid].param[j]

            sessinfo[i]['regress'] = []
            if hasattr(info, 'regressors') and info.regressors is not None:
                for j, r in enumerate(info.regressors):
                    sessinfo[i]['regress'].insert(j, dict(name='', val=[]))
                    if hasattr(info, 'regressor_names') and \
                            info.regressor_names is not None:
                        sessinfo[i]['regress'][j]['name'] = \
                            info.regressor_names[j]
                    else:
                        sessinfo[i]['regress'][j]['name'] = 'UR%d' % (j + 1)
                    sessinfo[i]['regress'][j]['val'] = info.regressors[j]
            sessinfo[i]['scans'] = functional_runs[i]

        if realignment_parameters is not None:
            for i, rp in enumerate(realignment_parameters):
                mc = realignment_parameters[i]
                for col in range(mc.shape[1]):
                    colidx = len(sessinfo[i]['regress'])
                    sessinfo[i]['regress'].insert(colidx, dict(name='', val=[]))
                    sessinfo[i]['regress'][colidx]['name'] = 'Realign%d' % (col + 1)
                    sessinfo[i]['regress'][colidx]['val'] = mc[:, col].tolist()

        if outliers is not None:
            for i, out in enumerate(outliers):
                numscans = 0
                for f in filename_to_list(sessinfo[i]['scans']):
                    shape = load(f, mmap=NUMPY_MMAP).shape
                    if len(shape) == 3 or shape[3] == 1:
                        iflogger.warning('You are using 3D instead of 4D '
                                         'files. Are you sure this was '
                                         'intended?')
                        numscans += 1
                    else:
                        numscans += shape[3]

                for j, scanno in enumerate(out):
                    colidx = len(sessinfo[i]['regress'])
                    sessinfo[i]['regress'].insert(colidx, dict(name='', val=[]))
                    sessinfo[i]['regress'][colidx]['name'] = 'Outlier%d' % (j + 1)
                    sessinfo[i]['regress'][colidx]['val'] = \
                        np.zeros((1, numscans))[0].tolist()
                    sessinfo[i]['regress'][colidx]['val'][int(scanno)] = 1
        return sessinfo
'''
Created on Jul 19, 2018

@author: Yan
'''
from builtins import int
print('Enter birth Year')
birthYear = int(input('It must be an integer\n'))


def ageCalculator(birthYear):
    age = 2018 - birthYear
    print('your age is: ', age)
    return age


def test_correct_age_is_calculated(calculatedAge):
    assert currentAge == calculatedAge
    print('\nAssertion made for: ', currentAge, ' years old!')
    return True


currentAge = ageCalculator(birthYear)
testAge = test_correct_age_is_calculated(currentAge)
Beispiel #39
0
def f_plane_wind_test(physics, aro_exec, nx, ny, dx, dy, dt, nTimeSteps):

    layers = 1
    grid = aro.Grid(nx, ny, layers, dx, dy)

    rho0 = 1035.

    def wind_x(X, Y, *arg):
        wind_x = np.zeros(Y.shape, dtype=np.float64)
        wind_x[int(grid.nx / 2), int(grid.ny / 2)] = 1e-5

        if not arg:
            plt.figure()
            plt.pcolormesh(X / 1e3, Y / 1e3, wind_x)
            plt.colorbar()
            plt.savefig('wind_x.png')
            plt.close()
        return wind_x

    def wind_y(X, Y, *arg):
        wind_y = np.zeros(X.shape, dtype=np.float64)
        wind_y[int(grid.nx / 2), int(grid.ny / 2)] = 1e-5

        if not arg:
            plt.figure()
            plt.pcolormesh(X / 1e3, Y / 1e3, wind_y)
            plt.colorbar()
            plt.savefig('wind_y.png')
            plt.close()
        return wind_y

    def dbl_periodic_wetmask(X, Y):
        return np.ones(X.shape, dtype=np.float64)

    with opt.working_directory(
            p.join(self_path,
                   "physics_tests/f_plane_{0}_wind".format(physics))):

        sub.check_call(["rm", "-rf", "output/"])
        drv.simulate(initHfile=[400.],
                     zonalWindFile=[wind_x],
                     meridionalWindFile=[wind_y],
                     valgrind=False,
                     nx=nx,
                     ny=ny,
                     exe=aro_exec,
                     dx=dx,
                     dy=dy,
                     wetMaskFile=[dbl_periodic_wetmask],
                     dt=dt,
                     dumpFreq=int(dt * nTimeSteps / 50),
                     nTimeSteps=nTimeSteps)

        hfiles = sorted(glob.glob("output/snap.h.*"))
        ufiles = sorted(glob.glob("output/snap.u.*"))
        vfiles = sorted(glob.glob("output/snap.v.*"))

        # expect the momentum to grow according to u*h*rho0 = delta_t * wind
        # F = m * a
        # m * v = h * rho0 * xlen * ylen * v
        #       = m * a * dt
        #       = F * dt
        #       = wind * dx * dy * dt

        momentum = np.zeros(len(hfiles), dtype=np.float64)
        model_iteration = np.zeros(len(hfiles), dtype=np.float64)

        momentum_expected = np.zeros(len(hfiles), dtype=np.float64)

        volume = np.zeros(len(hfiles))

        for counter, ufile in enumerate(ufiles):

            h = aro.interpret_raw_file(hfiles[counter], nx, ny, layers)
            u = aro.interpret_raw_file(ufile, nx, ny, layers)
            v = aro.interpret_raw_file(vfiles[counter], nx, ny, layers)

            model_iteration[counter] = float(ufile[-10:])

            # plt.figure()
            # plt.pcolormesh(grid.xp1,grid.y,u[:,:,0].transpose())
            # plt.colorbar()
            # plt.savefig('u.{0}.png'.format(ufile[-10:]),dpi=150)
            # plt.close()

            # plt.figure()
            # plt.pcolormesh(grid.x,grid.y,h[:,:,0].transpose())
            # plt.colorbar()
            # plt.savefig('h.{0}.png'.format(ufile[-10:]),dpi=150)
            # plt.close()

            momentum[counter] = dx * dy * rho0 * (
                np.sum(np.absolute(h * (u[:, :, 1:] + u[:, :, :-1]) / 2.)) +
                np.sum(np.absolute(h * (v[:, 1:, :] + v[:, :-1, :]) / 2.)))

            momentum_expected[counter] = 2. * dx * dy * 1e-5 * (
                model_iteration[counter] + 2) * dt

            volume[counter] = np.sum(dx * dy * h)

            # plt.figure()
            # plt.pcolormesh(grid.xp1, grid.y, np.transpose(u[:,:,0]))
            # plt.colorbar()
            # plt.savefig('output/u.{0}.png'.format(model_iteration[counter]),dpi=100)
            # plt.close()

        opt.assert_volume_conservation(nx, ny, layers, 1e-9)

        plt.figure()
        plt.plot(model_iteration * dt / (30 * 86400),
                 momentum_expected,
                 '-',
                 alpha=1,
                 label='Expected momentum')
        plt.plot(model_iteration * dt / (30 * 86400),
                 momentum,
                 '-',
                 alpha=1,
                 label='Simulated momentum')
        plt.legend()
        plt.xlabel('Time (months)')
        plt.ylabel('Momentum')
        plt.savefig('f_plane_momentum_test.png', dpi=150)
        plt.close()

        plt.figure()
        plt.plot(model_iteration, momentum / momentum_expected)
        plt.xlabel('timestep')
        plt.ylabel('simulated/expected')
        plt.title('final ratio = {0}'.format(
            str(momentum[-1] / momentum_expected[-1])))
        plt.savefig('ratio.png')
        plt.close()

        plt.figure()
        plt.plot(model_iteration,
                 100. * (momentum - momentum_expected) / momentum_expected)
        plt.xlabel('timestep')
        plt.ylabel('percent error')
        plt.ylim(-4, 4)
        plt.savefig('percent_error.png')
        plt.close()

        plt.figure()
        plt.plot(model_iteration, momentum - momentum_expected)
        plt.xlabel('timestep')
        plt.ylabel('simulated - expected')
        plt.savefig('difference.png')
        plt.close()

        plt.figure()
        plt.plot(model_iteration, volume)
        plt.ylabel('Volume')
        plt.xlabel('timestep')
        plt.ylim(np.min(volume), np.max(volume))
        plt.savefig('volume.png')
        plt.close()

        percent_error = 100. * (momentum -
                                momentum_expected) / momentum_expected

        return percent_error[-1]
Beispiel #40
0
 def set_int_custom_property(self, key: Text, value: int):
     """Set a custom property of int type."""
     self._artifact.custom_properties[key].int_value = builtins.int(value)
    def configFeAsic(self):
        print("CONFIG ASICs")

        # #global config varibles
        feasicLeakageVal = int( self.feasicLeakage ) #0 = 500pA, 1 = 100pA
        feasicLeakagex10Val = int( self.feasicLeakagex10 ) #0 = x1, 1 = x10
        acdcVal = int( self.feasicAcdc ) #DC = 0, AC = 1
        
        #channel specific variables
        testVal = int( self.feasicEnableTestInput )
        baseVal = int( self.feasicBaseline ) #0 = 900mV, 1 = 200mV
        gainVal = int( self.feasicGain )
        shapeVal = int( self.feasicShape )
        bufVal = int( self.feasicBuf ) #0 = OFF, 1 = ON

        if (testVal < 0 ) or (testVal > 1):
            return
        if (baseVal < 0 ) or (baseVal > 1):
            return
        if (gainVal < 0 ) or (gainVal > 3):
            return
        if (shapeVal < 0 ) or (shapeVal > 3):
            return
        if (acdcVal < 0 ) or (acdcVal > 1):
            return
        if (bufVal < 0 ) or (bufVal > 1):
            return
        if (feasicLeakageVal < 0 ) or (feasicLeakageVal > 1 ):
            return
        if (feasicLeakagex10Val < 0) or (feasicLeakagex10Val > 1):
            return

        #gain
        gainArray = [0,2,1,3] #00=4.7, 10=7.8, 01=14, 11=25
        gainValCorrect = gainArray[gainVal]
        
        #shape
        shapeArray = [2,0,3,1] #00=1.0, 10=0.5, 01=3.0, 11=2.0
        shapeValCorrect = shapeArray[shapeVal]

        #COTS Register Settings
        sts = testVal
        snc = baseVal
        sg = gainValCorrect
        st = shapeValCorrect
        smn = 0 #Output monitor enabled: not currently an option in femb_python so keep at 0 for now
        sdf = bufVal
        chn_reg = ((sts&0x01)<<7) + ((snc&0x01)<<6) + ((sg&0x03)<<4) + ((st&0x03)<<2)  + ((smn&0x01)<<1) + ((sdf&0x01)<<0)

        #COTS Global Register Settings
        slk0 = feasicLeakageVal
        stb1 = 0 #Monitors not currently used in femb_python
        stb = 0 #Monitors not currently used in femb_python
        s16 = 0 #High filter in channel 16 disabled
        slk1 = feasicLeakagex10Val
        sdc = acdcVal
        swdac = 0 #For pulser, set elsewhere
        dac = 0 #For pulser, set elsewhere
        global_reg = ((slk0&0x01)<<0) + ((stb1&0x01)<<1) + ((stb&0x01)<<2)+ ((s16&0x01)<<3) + ((slk1&0x01)<<4) + ((sdc&0x01)<<5) +((00&0x03)<<6)
        dac_reg = (((dac&0x01)//0x01)<<7)+(((dac&0x02)//0x02)<<6)+\
                  (((dac&0x04)//0x04)<<5)+(((dac&0x08)//0x08)<<4)+\
                  (((dac&0x10)//0x10)<<3)+(((dac&0x20)//0x20)<<2)+\
                  (((swdac&0x03))<<0)
        
        for chip in range(self.NASICS):
            for chn in range(self.NASICCH):
                if self.useLArIATmap:
                    key = "wib{:d}_femb{:d}_chip{:d}_chan{:02d}".format(self.wibNum,self.fembNum,chip+1,chn) #Note map has chips 1-8, not 0-7
                    if self.WireDict[key][0] == "X":
                        snc = 0 #set baseline for collection
                    elif self.WireDict[key][0] == "U":
                        snc = 1 #set baseline for induction

                chn_reg = ((sts&0x01)<<7) + ((snc&0x01)<<6) + ((sg&0x03)<<4) + ((st&0x03)<<2)  + ((smn&0x01)<<1) + ((sdf&0x01)<<0)
                chn_reg_bool = []
                for j in range(8):
                    chn_reg_bool.append ( bool( (chn_reg>>j)%2 ))
                start_pos = (8*16+16)*chip + (16-chn)*8
                self.fe_regs[start_pos-8 : start_pos] = chn_reg_bool

            global_reg_bool = []
            for j in range(8):
                global_reg_bool.append ( bool( (global_reg>>j)%2 ) )
            for j in range(8):
                global_reg_bool.append ( bool( (dac_reg>>j)%2 ) )

            start_pos = (8*16+16)*chip + 16*8
            self.fe_regs[start_pos : start_pos+16] = global_reg_bool

        #Convert bits to 36 32-bit register words
        for chip in [0,2,4,6]:
            chip_bits_len = 8*(16+2)
            chip_fe_regs0 = self.fe_regs[   chip*chip_bits_len: (chip+1)* chip_bits_len]
            chip_fe_regs1 = self.fe_regs[   (chip+1)*chip_bits_len: (chip+2)* chip_bits_len]
            chip_regs = []
            for onebit in chip_fe_regs0:
                chip_regs.append(onebit)
            for onebit in chip_fe_regs1:
                chip_regs.append(onebit)
            len32 = len(chip_regs)//32
            if (len32 != 9):
                print("ERROR FE register mapping")
            else:
                for i in range(len32):
                    if ( i*32 <= len(chip_regs) ):
                        bits32 = chip_regs[i*32: (i+1)*32]
                        self.fe_REGS[int(chip/2*len32 + i) ] = (sum(v<<j for j, v in enumerate(bits32)))


        #turn off HS data before register writes
        self.femb.write_reg_bits(9 , 0, 0x1, 0 )
        print("HS link turned off")
        time.sleep(2)

        #run the SPI programming
        self.doAsicConfig()

        #turn HS link back on
        print("HS link turned back on")
        time.sleep(2)
        self.femb.write_reg_bits(9 , 0, 0x1, 1 )
    def __init__(self):
        #declare basic system parameters
        self.NFEMBS = 4
        self.NASICS = 8
        self.NASICCH = 16

        #declare board specific registers
        self.FEMB_VER = "WIB_SBND"
        self.REG_RESET = 0
        self.REG_ASIC_RESET = 1
        self.REG_ASIC_SPIPROG = 2
        self.REG_SOFT_ADC_RESET = 1

        self.REG_LATCHLOC_3_TO_0 = 4
        self.REG_LATCHLOC_7_TO_4 = 14

        self.REG_FPGA_TP_EN = 16
        self.REG_ASIC_TP_EN = 16
        self.REG_DAC_SELECT = 16
        self.REG_TP = 5

        self.CLK_SELECT = 6
        self.CLK_SELECT2 = 15

        self.REG_SEL_ASIC = 7
        self.REG_SEL_ASIC_LSB = 8

        self.REG_WIB_MODE = 8
        self.REG_ADC_DISABLE = 8

        self.REG_HS_DATA = 9
        self.REG_HS = 17

        self.INT_TP_EN = 18
        self.EXT_TP_EN = 18

        self.REG_SPI_BASE = 0x200
        self.REG_SPI_RDBACK_BASE = 0x250
        
        self.REG_TEST_PAT = 3
        self.REG_TEST_PAT_DATA = 0x01230000

        #COTS shifts
        self.fe1_sft = 0x00000000
        self.fe2_sft = 0x00000000
        self.fe3_sft = 0x00000000
        self.fe4_sft = 0x00000000
        self.fe5_sft = 0x00000000
        self.fe6_sft = 0x00000000
        self.fe7_sft = 0x00000000
        self.fe8_sft = 0x00000000

        #COTS phases
        self.fe1_pha = 0x00000000
        self.fe2_pha = 0x00000000
        self.fe3_pha = 0x00000000
        self.fe4_pha = 0x00000000
        self.fe5_pha = 0x00000000
        self.fe6_pha = 0x00000000
        self.fe7_pha = 0x00000000
        self.fe8_pha = 0x00000000

                
        #internal variables
        self.fembNum = 0
        self.wibNum = 0
        self.useExtAdcClock = True
        self.isRoomTemp = False
        self.doReSync = True
        self.spiStatus = 0x0
        self.syncStatus = 0x0
        self.CLKSELECT_val_RT = 0xFF
        self.CLKSELECT2_val_RT = 0xFF
        self.CLKSELECT_val_CT = 0xEF
        self.CLKSELECT2_val_CT = 0xEF
        self.REG_LATCHLOC_3_TO_0_val = 0x04040404
        self.REG_LATCHLOC_7_TO_4_val = 0x04040404
        self.fe_regs = [0x00000000]*(16+2)*8*8
        self.fe_REGS = [0x00000000]*(8+1)*4
        self.useLArIATmap = True

        #initialize FEMB UDP object
        self.femb = FEMB_UDP()
        self.femb.UDP_PORT_WREG = 32000 #WIB PORTS
        self.femb.UDP_PORT_RREG = 32001
        self.femb.UDP_PORT_RREGRESP = 32002
        self.femb.doReadBack = False #WIB register interface is unreliable

        #ASIC config variables
        self.feasicLeakage = 0 #0 = 500pA, 1 = 100pA
        self.feasicLeakagex10 = 0 #0 = pA, 1 = pA*10
        self.feasicAcdc = 0 #AC = 0, DC = 1
        self.feasicBaseline = 1 #0 = 200mV, 1 = 900mV        
        self.feasicEnableTestInput = 0 #0 = disabled, 1 = enabled
        self.feasicGain = 2 #4.7,7.8,14,25
        self.feasicShape = 1 #0.5,1,2,3
        self.feasicBuf = 0 #0 = OFF, 1 = ON

        #Read in LArIAT mapping if desired

        if self.useLArIATmap:
            self.cppfr = CPP_FILE_RUNNER()            
            with open(self.cppfr.filename('configuration/configs/LArIAT_pin_mapping.map'), "rb") as fp:
                self.lariatMap = pickle.load(fp)
                
            #APA Mapping
            va = self.lariatMap
            va_femb = []
            for vb in va:
                if int(vb[9]) in (0,1,2,3,4) :
                    va_femb.append(vb)
            apa_femb_loc = []
            for chn in range(128):
                for vb in va_femb:
                    if int(vb[8]) == chn:
                        if (vb[1].find("Co")) >= 0 :#collection wire
                            chninfo = [ "X" + vb[0], vb[8], int(vb[6]), int(vb[7]), int(vb[9]), int(vb[10])]
                        elif (vb[1].find("In")) >= 0 : #induction wire
                            chninfo = [ "U" + vb[0], vb[8], int(vb[6]), int(vb[7]), int(vb[9]), int(vb[10])]
                        apa_femb_loc.append(chninfo)
            for chn in range(128):
                fl_w = True
                fl_i = 0
                for tmp in apa_femb_loc:
                    if int(tmp[1]) == chn:
                        fl_w = False
                        break
                if (fl_w):
                    chninfo = [ "V" + format(fl_i, "03d"), format(chn, "03d"), chn//16 , format(chn%15, "02d"), apa_femb_loc[0][4], apa_femb_loc[0][5]]
                    apa_femb_loc.append(chninfo)
                    fl_i = fl_i + 1

            self.All_sort = []
            self.X_sort = []
            self.V_sort = []
            self.U_sort = []
            for i in range(128):
                for chn in apa_femb_loc:
                    if int(chn[1][0:3]) == i :
                        self.All_sort.append(chn)
    
                    for chn in apa_femb_loc:
                        if chn[0][0] == "X" and int(chn[0][1:3]) == i :
                            self.X_sort.append(chn)
                    for chn in apa_femb_loc:
                        if chn[0][0] == "V" and int(chn[0][1:3]) == i :
                            self.V_sort.append(chn)
                    for chn in apa_femb_loc:
                        if chn[0][0] == "U" and int(chn[0][1:3]) == i :
                            self.U_sort.append(chn)

            self.WireDict = {}
            for line in self.All_sort:
                key = "wib{:d}_femb{:d}_chip{:d}_chan{:02d}".format(line[5],line[4],line[2],line[3])
                self.WireDict[key] = line[0]
Beispiel #43
0
 def _ReturnSessionIdValue(self, InputString):
     #Session Id:       0xF08A4
     return int(InputString.partition(":")[2].strip(), base=0)
Beispiel #44
0
 def __init__(self, dtype_or_func=None, default=None, missing_values=None,
              locked=False):
     # Convert unicode (for Py3)
     if isinstance(missing_values, unicode):
         missing_values = asbytes(missing_values)
     elif isinstance(missing_values, (list, tuple)):
         missing_values = asbytes_nested(missing_values)
     # Defines a lock for upgrade
     self._locked = bool(locked)
     # No input dtype: minimal initialization
     if dtype_or_func is None:
         self.func = str2bool
         self._status = 0
         self.default = default or False
         dtype = np.dtype('bool')
     else:
         # Is the input a np.dtype ?
         try:
             self.func = None
             dtype = np.dtype(dtype_or_func)
         except TypeError:
             # dtype_or_func must be a function, then
             if not hasattr(dtype_or_func, '__call__'):
                 errmsg = ("The input argument `dtype` is neither a"
                           " function nor a dtype (got '%s' instead)")
                 raise TypeError(errmsg % type(dtype_or_func))
             # Set the function
             self.func = dtype_or_func
             # If we don't have a default, try to guess it or set it to
             # None
             if default is None:
                 try:
                     default = self.func(asbytes('0'))
                 except ValueError:
                     default = None
             dtype = self._getdtype(default)
         # Set the status according to the dtype
         _status = -1
         for (i, (deftype, func, default_def)) in enumerate(self._mapper):
             if np.issubdtype(dtype.type, deftype):
                 _status = i
                 if default is None:
                     self.default = default_def
                 else:
                     self.default = default
                 break
         if _status == -1:
             # We never found a match in the _mapper...
             _status = 0
             self.default = default
         self._status = _status
         # If the input was a dtype, set the function to the last we saw
         if self.func is None:
             self.func = func
         # If the status is 1 (int), change the function to
         # something more robust.
         if self.func == self._mapper[1][1]:
             if issubclass(dtype.type, np.uint64):
                 self.func = np.uint64
             elif issubclass(dtype.type, np.int64):
                 self.func = np.int64
             else:
                 self.func = lambda x: int(float(x))
     # Store the list of strings corresponding to missing values.
     if missing_values is None:
         self.missing_values = set([asbytes('')])
     else:
         if isinstance(missing_values, bytes):
             missing_values = missing_values.split(asbytes(","))
         self.missing_values = set(list(missing_values) + [asbytes('')])
     #
     self._callingfunction = self._strict_call
     self.type = self._dtypeortype(dtype)
     self._checked = False
     self._initial_default = default
Beispiel #45
0
        #  c:  0xC03DDDA6174963AD10224BADDBCF7ED9EA5E3DAE91941CB428D2EC060B4F290A
        # u1:  0x113BE17918E856E4D6EC2EE04F5E9B3CB599B82AC879C8E32A0140C290D32659
        # u2:  0x2976F786AE6333E125C0DFFD6C16D37E8CED5ABEDB491BCCA21C75B307D0B318
        # u1G: 0x51e4e6ed6f4b1db33b0d21b8bd30fb732f1d999c4e27bb1800eba20813ad3e86
        #      0x93101a9fa0d5c7c680400b03d3becb9130dd8f9f4d9b034360a74829dc1201ab
        # u2Q: 0xeaca8440897333e259d0f99165611b085d6e10a9bfd371c451bc0aea1aeb99c3
        #      0x57c5c95ea9f491c0fd9029a4089a2e6df47313f915f3e39e9f12e03ab16521c2
        #  + : 0x0623b4159c7112125be51716d1e706d68e52f5b321da68d8b86b3c7c7019a9da
        #    : 0x1029094ccc466a534df3dbb7f588b283c9bef213633750aeff021c4c131b7ce5
        # SIG: 3045
        #      0220
        #       0623b4159c7112125be51716d1e706d68e52f5b321da68d8b86b3c7c7019a9da
        #      0221
        #       008dffe3c592a0c7e5168dcb3d4121a60ee727082be4fbf79eae564929156305fc

        msg = int(
            0xba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad)
        sig = int(
            0x304502200623b4159c7112125be51716d1e706d68e52f5b321da68d8b86b3c7c7019a9da0221008dffe3c592a0c7e5168dcb3d4121a60ee727082be4fbf79eae564929156305fc
        )
        msg = msg.to_bytes(32, 'big')
        sig = sig.to_bytes(0x47, 'big')

        signer = ECDSA()
        assert (signer.verify(msg, sig, pu_key))

        #  k:     0xe5a8d1d529971c10ca2af378444fb544a211707892c8898f91dcb171584e3db9
        # kG:     0x4f123ed9de853836447782f0a436508d34e6609083cf97c9b9cd69673d8f04a5
        #         0x50b57473f987f2d7c4715827dbd7b23c3088645d5f898aa66e4ef2778591d643
        # kinv:    0x0F2DD0361F61F683957CF708FB54DBC0B6B97F9EDF28604983E6F492117C154C
        # kinv.d   0xFF68C89B97C63273EB5F787FBC0C33DA02BA4C883AB09E3D381197E9E3964E8C
        # kinv.d.x 0x5A20058782FB81C8F98C30D9D441B7196C22939B144918CF519FB155180664B1
    return os.path.exists(h_path)


def add_version_header(tc):
    tc["headers"].append("version")
    return tc


feature_test_macros = sorted(
    [
        add_version_header(x) for x in [
            # C++14 macros
            {
                "name": "__cpp_lib_integer_sequence",
                "values": {
                    "c++14": int(201304)
                },
                "headers": ["utility"],
            },
            {
                "name": "__cpp_lib_exchange_function",
                "values": {
                    "c++14": int(201304)
                },
                "headers": ["utility"],
            },
            {
                "name": "__cpp_lib_tuples_by_type",
                "values": {
                    "c++14": int(201304)
                },
Beispiel #47
0
    def __init__(self,
                 server_address,
                 RequestHandlerClass,
                 bind_and_activate=True):
        self.remove_file = None
        self.mode = None
        self.listen_fd = get_listen_fd()

        if self.listen_fd:
            server_address = self.listen_fd
            self.address_family = socket.AF_UNIX
            self.socket = socket.fromfd(self.listen_fd, socket.AF_UNIX,
                                        socket.SOCK_STREAM)

        elif server_address.startswith("unix:"):
            self.address_family = socket.AF_UNIX
            address = server_address[5:]
            m = address.rfind(';mode=')
            if m != -1:
                self.mode = address[m + 6:]
                address = address[:m]

            if address[0] == '@':
                address = address.replace('@', '\0', 1)
                self.mode = None
            else:
                self.remove_file = address

            server_address = address
            self.socket = socket.socket(self.address_family, self.socket_type)
            self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)

        elif server_address.startswith("tcp:"):
            address = server_address[4:]
            p = address.rfind(':')
            if p != -1:
                port = int(address[p + 1:])
                address = address[:p]
            else:
                raise ConnectionError("Invalid address 'tcp:%s'" % address)
            address = address.replace('[', '')
            address = address.replace(']', '')

            try:
                res = socket.getaddrinfo(address,
                                         port,
                                         proto=socket.IPPROTO_TCP,
                                         flags=socket.AI_NUMERICHOST)
            except TypeError:
                res = socket.getaddrinfo(address, port, self.address_family,
                                         self.socket_type, socket.IPPROTO_TCP,
                                         socket.AI_NUMERICHOST)

            af, socktype, proto, canonname, sa = res[0]
            self.address_family = af
            self.socket_type = socktype
            self.socket = socket.socket(self.address_family, self.socket_type)
            server_address = sa[0:2]

        else:
            raise ConnectionError("Invalid address '%s'" % server_address)

        BaseServer.__init__(self, server_address, RequestHandlerClass)

        if bind_and_activate:
            try:
                self.server_bind()
                self.server_activate()
            except:
                self.server_close()
                raise
Beispiel #48
0
    parser.add_argument("trail_size",
                        help="Display a trail of diameter x while rendering")
    parser.add_argument("--performance_test",
                        help="don't render anything, just calculate",
                        action="store_true")
    args = parser.parse_args()

    system = None
    if args.simulation_type == 'BarnesHut':
        system = RenderableBarnesHutSystem()
    elif args.simulation_type == 'BruteForce':
        system = RenderableBruteForceSystem()
    elif args.simulation_type == 'ApoapsisExample':
        system = RenderableApoapsisExampleSystem()
    elif args.simulation_type == 'HohmannExampleA':
        system = RenderableHohmannTransferExampleSystemA()
    elif args.simulation_type == 'HohmannExampleB':
        system = RenderableHohmannTransferExampleSystemB()
    elif args.simulation_type == 'HohmannExampleC':
        system = RenderableHohmannTransferExampleSystemC()
    else:
        exit(1)

    system.start_the_bodies(int(args.bodies))
    renderer = SystemRenderer(system,
                              frames=int(args.frames),
                              trail_size=int(args.trail_size),
                              performance_test=args.performance_test)

    renderer.run()
def intOrFloat(string):
    '''Not sure if your string is formatted as an int or a float? Use intOrFloat instead!'''
    try:
        return int(string)
    except ValueError:
        return float(string)
Beispiel #50
0
def depinContent(u, PageSize,floadName):
    urls = u
    pagesize = int(PageSize)
    for i in range(pagesize):
        foladPath = floadName+"/"+str(i+1)
        createfolad(foladPath)
        Content = sendUrl(getMainUrl(urls,i+1,False)).content.decode('GBK')
        #UI
        if(floadName == 'UML'):
          child = "UML"
        if(floadName == '建模'):
            child = "UI"
        if(floadName == '需求'):
            child = "requirement"
        if(floadName == '设计'):
            child = "swdesign"
        if(floadName == '编码、构建与集成'):
            child = "bmgjjc"
        if(floadName == '测试'):
            child = "test"
        if(floadName == '界面'):
            child = "UI"
        if(floadName == '产品管理'):
            child = "productmana"
        if(floadName == '项目管理'):
            child = "xmgl"
        if(floadName == '研发管理'):
            child = "productmana"
        if(floadName == '配置管理'):
            child = "pzgl"
        if(floadName == '质量管理'):
            child = "zlgl"
        if(floadName == '过程改进'):
            child = "process"
        if(floadName == '大数据'):
            child = "bigdata"
        if(floadName == '数据库'):
            child = "datebase"
        if(floadName == '数据仓库'):
            child = "datecangku"
        if(floadName == '数据挖掘'):
            child = "datecangku"
        if(floadName == '企业架构'):
            child = "qiyejiagou"
        if(floadName == 'IT规划与治理'):
            child = "ITguihua"
        if(floadName == '运营管理'):
            child = "yunyinggl"
        if(floadName == 'IT运维'):
            child = "itil"    
        if(floadName == 'DevOps'):
            child = "devops"       
        if(floadName == '安全'):
            child = "safe"         
        if(floadName == 'JAVA'):
            child = "java"      
        if(floadName == '.net'):
            child = "net"     
        if(floadName == 'c,c++'):
            child = "C"         
        if(floadName == 'web开发'):
            child = "web"         
        if(floadName == '移动端开发'):
            child = "phone"                    
        if(floadName == '嵌入式开发'):
            child = "qrskf"       
        if(floadName == 'IT人员培养'):
            child = "itpeiyang"            
        if(floadName == '云计算'):
            child = "yjs"       
        if(floadName == '网络技术'):
            child = "wljs"        
        if(floadName == '办公'):
            child = "office"        
        if(floadName == '人工智能'):
            child = "ai"      
        if(floadName == 'python'):
            child = "python"    
        if(floadName == '微服务'):
            child = "wfw"      
        if(floadName == '学生'):
            child = ""  
            return         
        getContent(Content,foladPath, floadName,child)
Beispiel #51
0
def parse_target(target_name):
    try:
        host, ip = target_name.split(":")
        return host, int(ip)
    except ValueError:
        raise ValueError("Target format is HOST:PORT")
Beispiel #52
0
from builtins import int
cadena = input("Dame una cadena: ")
i = int(input("Dame un numero: "))
j = int(input("Dame otro numero: "))

subcadena = ''
for k in range(i, j):
    subcadena += cadena[k]

print("La subcadena entre %d y %d es %s" % (i, j, subcadena))
Beispiel #53
0
 def isone(self):
     """ Return zero if the element is one. """
     return int(_C.GT_ELEM_is_unity(self.group.bpg, self.elem)) == 1
Beispiel #54
0
 def eq(self, other):
     """ Returns True if elements are equal. """
     resp = _C.GT_ELEM_cmp(self.elem, other.elem)
     return (int(resp) == 0)
Beispiel #55
0
    def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans):
        """Generates a regressor for a sparse/clustered-sparse acquisition
        """
        bplot = False
        if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
            bplot = True
            import matplotlib
            matplotlib.use(config.get('execution', 'matplotlib_backend'))
            import matplotlib.pyplot as plt

        TR = np.round(self.inputs.time_repetition * 1000)  # in ms
        if self.inputs.time_acquisition:
            TA = np.round(self.inputs.time_acquisition * 1000)  # in ms
        else:
            TA = TR  # in ms
        nvol = self.inputs.volumes_in_cluster
        SCANONSET = np.round(self.inputs.scan_onset * 1000)
        total_time = TR * (nscans - nvol) / nvol + TA * nvol + SCANONSET
        SILENCE = TR - TA * nvol
        dt = TA / 10.0
        durations = np.round(np.array(i_durations) * 1000)
        if len(durations) == 1:
            durations = durations * np.ones((len(i_onsets)))
        onsets = np.round(np.array(i_onsets) * 1000)
        dttemp = gcd(TA, gcd(SILENCE, TR))
        if dt < dttemp:
            if dttemp % dt != 0:
                dt = float(gcd(dttemp, dt))

        if dt < 1:
            raise Exception('Time multiple less than 1 ms')
        iflogger.info('Setting dt = %d ms\n', dt)
        npts = int(np.ceil(total_time / dt))
        times = np.arange(0, total_time, dt) * 1e-3
        timeline = np.zeros((npts))
        timeline2 = np.zeros((npts))
        if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf:
            hrf = spm_hrf(dt * 1e-3)
        reg_scale = 1.0
        if self.inputs.scale_regressors:
            boxcar = np.zeros(int(50.0 * 1e3 / dt))
            if self.inputs.stimuli_as_impulses:
                boxcar[int(1.0 * 1e3 / dt)] = 1.0
                reg_scale = float(TA / dt)
            else:
                boxcar[int(1.0 * 1e3 / dt):int(2.0 * 1e3 / dt)] = 1.0

            if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf:
                response = np.convolve(boxcar, hrf)
                reg_scale = 1.0 / response.max()
                iflogger.info('response sum: %.4f max: %.4f', response.sum(),
                              response.max())
            iflogger.info('reg_scale: %.4f', reg_scale)

        for i, t in enumerate(onsets):
            idx = int(np.round(t / dt))
            if i_amplitudes:
                if len(i_amplitudes) > 1:
                    timeline2[idx] = i_amplitudes[i]
                else:
                    timeline2[idx] = i_amplitudes[0]
            else:
                timeline2[idx] = 1

            if bplot:
                plt.subplot(4, 1, 1)
                plt.plot(times, timeline2)

            if not self.inputs.stimuli_as_impulses:
                if durations[i] == 0:
                    durations[i] = TA * nvol
                stimdur = np.ones((int(durations[i] / dt)))
                timeline2 = np.convolve(timeline2, stimdur)[0:len(timeline2)]
            timeline += timeline2
            timeline2[:] = 0

        if bplot:
            plt.subplot(4, 1, 2)
            plt.plot(times, timeline)

        if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf:
            timeline = np.convolve(timeline, hrf)[0:len(timeline)]
            if isdefined(self.inputs.use_temporal_deriv) and \
                    self.inputs.use_temporal_deriv:
                # create temporal deriv
                timederiv = np.concatenate(([0], np.diff(timeline)))

        if bplot:
            plt.subplot(4, 1, 3)
            plt.plot(times, timeline)
            if isdefined(self.inputs.use_temporal_deriv) and \
                    self.inputs.use_temporal_deriv:
                plt.plot(times, timederiv)
        # sample timeline
        timeline2 = np.zeros((npts))
        reg = []
        regderiv = []
        for i, trial in enumerate(np.arange(nscans) / nvol):
            scanstart = int((SCANONSET + trial * TR + (i % nvol) * TA) / dt)
            scanidx = scanstart + np.arange(int(TA / dt))
            timeline2[scanidx] = np.max(timeline)
            reg.insert(i, np.mean(timeline[scanidx]) * reg_scale)
            if isdefined(self.inputs.use_temporal_deriv) and \
                    self.inputs.use_temporal_deriv:
                regderiv.insert(i, np.mean(timederiv[scanidx]) * reg_scale)

        if isdefined(self.inputs.use_temporal_deriv) and \
                self.inputs.use_temporal_deriv:
            iflogger.info('orthoganlizing derivative w.r.t. main regressor')
            regderiv = orth(reg, regderiv)

        if bplot:
            plt.subplot(4, 1, 3)
            plt.plot(times, timeline2)
            plt.subplot(4, 1, 4)
            plt.bar(np.arange(len(reg)), reg, width=0.5)
            plt.savefig('sparse.png')
            plt.savefig('sparse.svg')

        if regderiv:
            return [reg, regderiv]
        else:
            return reg
Beispiel #56
0
 def iszero(self):
     """ Return True if the element is zero."""
     return int(_C.GT_ELEM_is_zero(self.elem)) == 1
Beispiel #57
0
def merge_lines(lines, listpt, P):
    thresh = copy.deepcopy(P["sgmnt_threshold"]['value'])
    img_size = copy.deepcopy(P["img_size"])
    # lines format: y1, x1, y2, x2, length, slope, alpha, index, start_pt, end_pt
    # listpt = squeeze_arr(listpt)

    # All lines that can be merged. Merging lines
    # will group them together and then add the grouping to the list
    out = [[n] for n in range(0, lines.shape[0])]
    new_lp = listpt.tolist()

    # Get unique start and end points. These are what we check
    unique_pts = sort(unique(lines[:, 8:10]))

    for index, ptx in enumerate(unique_pts):
        # Test each combination of lines with this
        # point to see which ones we can merge.
        # Formula is combinations w/o repetitions (choose 2)
        pairs = list(combinations(list(where(lines == ptx)[0]), 2))
        # print(pairs)

        # Go to next iteration if there's no combinations
        if not pairs:
            continue
        for i, curr_pair in enumerate(pairs):
            pt1, pt2, alph1, alph2, temp1, temp2 = relevant_lines(
                curr_pair, lines)
            # Check that the lines are within the threshold and not coincident
            if abs(alph1 - alph2) > thresh or compare(temp1, temp2):
                continue

            # Get the unique start and end points of the two lines
            lind1, lind2 = sort([
                int(i) for i in list(
                    filter(lambda e: e not in [ptx], chain(temp1 + temp2)))
            ])
            y1, x1 = unravel_index([lind1], img_size, order='F')
            y2, x2 = unravel_index([lind2], img_size, order='F')
            # print('y1', y1, 'x1', x1, 'y2', y2, 'x2', x2)
            # print(unravel_index([lind1], img_size, order='F'))
            slope, line_len, alpha = math_stuff(x1, y1, x2, y2)
            # print('slope', slope)

            # Intersection point is in the middle of the new line
            if min(alph1, alph2) <= alpha <= max(alph1, alph2):
                lines = delete(lines, max(pt1, pt2), axis=0)
                lines = delete(lines, min(pt1, pt2), axis=0)
                val1 = out[pt1]
                val2 = out[pt2]
                del out[max(pt1, pt2)]
                del out[min(pt1, pt2)]

                # Update both lists to reflect the addition of the merged line.
                lines = append(lines, [[
                    int(y1),
                    int(x1) + 1,
                    int(y2),
                    int(x2) + 1, line_len, slope, alpha, 0, lind1, lind2
                ]],
                               axis=0)
                out.append([val1, val2])

                # listpt = merge_listpoints(listpt, pt1, pt2, lind1, lind2)
                listpt = merge_lp(new_lp, pt1, pt2, lind1, lind2)
                # Merged lines, so don't check the other pairs
                break
            else:
                continue

    return lines, array(listpt), array(out)
Beispiel #58
0
def rsaEncrypt(text, pubKey, modulus):
    text = text[::-1]
    rs = pow(int(binascii.hexlify(text), 16), int(pubKey, 16)) % int(modulus, 16)
    return format(rs, 'x').zfill(256)
Beispiel #59
0
def check_input_args(input_filename=None,
                     output_filename=None,
                     input_part_size=None,
                     is_key_gen=None,
                     encrypt_mode=None,
                     key_file=None,
                     version_no=None,
                     print_arg_str=None,
                     print_encrypt_arg_str=None,
                     output_dir=None):

    global version, is_encrypt_data, input_size, key_gen

    version = version_no
    is_encrypt_data = encrypt_mode
    key_gen = is_key_gen
    input_size = input_part_size

    if not output_dir == os.getcwd() and (key_file
                                          and os.path.isabs(key_file)):
        sys.exit(
            "Error. Cannot provide --outdir argument as --keyfile is absolute path."
        )

    if not os.path.isdir(output_dir):
        distutils.dir_util.mkpath(output_dir)

    if is_encrypt_data.lower() == 'true':
        is_encrypt_data = True
    elif is_encrypt_data.lower() == 'false':
        is_encrypt_data = False

    if version == 'v1':
        version = Page.VERSION1
    elif version == 'v2':
        version = Page.VERSION2

    if key_gen.lower() == 'true':
        key_gen = True
    elif key_gen.lower() == 'false':
        key_gen = False

    if key_gen:
        if all(arg is not None
               for arg in [input_filename, output_filename, input_size]):
            if not is_encrypt_data:
                sys.exit("--encrypt argument is missing or set to false.")
        elif any(arg is not None
                 for arg in [input_filename, output_filename, input_size]):
            sys.exit(print_arg_str)
    else:
        if not (input_filename and output_filename and input_size):
            sys.exit(print_arg_str)

        if is_encrypt_data and not key_gen and not key_file:
            sys.exit(print_encrypt_arg_str)

        if not is_encrypt_data and key_file:
            sys.exit(
                "Invalid. Cannot give --keyfile as --encrypt is set to false.")

    if key_file:
        key_file_name, key_file_ext = os.path.splitext(key_file)
        if key_file_ext:
            if not key_file_ext == '.bin':
                sys.exit(
                    "--keyfile argument can be a filename with no extension or .bin extension only"
                )

    # If only one of the arguments - input_filename, output_filename, input_size is given
    if ((any(arg is None for arg in [input_filename, output_filename, input_size])) is True) and \
            ((all(arg is None for arg in [input_filename, output_filename, input_size])) is False):
        sys.exit(print_arg_str)

    if input_size:
        # Set size
        input_size = int(input_size, 0)

        if input_size % 4096 != 0:
            sys.exit("Size of partition must be multiple of 4096")

        # Update size as a page needs to be reserved of size 4KB
        input_size = input_size - Page.PAGE_PARAMS["max_size"]

        if input_size < (2 * Page.PAGE_PARAMS["max_size"]):
            sys.exit("Minimum NVS partition size needed is 0x3000 bytes.")
 def count(self, elem):
     """Count the number of times elem appears in the count."""
     if not self.step:
         return _coconut.float("inf") if elem == self.start else 0
     return int(elem in self)