Ejemplo n.º 1
1
    def __call__(self):
        "Return the locations of the ticks"
        b = self._transform.base

        vmin, vmax = self.axis.get_view_interval()
        vmin, vmax = self._transform.transform_point((vmin, vmax))
        if vmax < vmin:
            vmin, vmax = vmax, vmin
        numdec = math.floor(vmax) - math.ceil(vmin)

        if self._subs is None:
            if numdec > 10:
                subs = np.array([1.0])
            elif numdec > 6:
                subs = np.arange(2.0, b, 2.0)
            else:
                subs = np.arange(2.0, b)
        else:
            subs = np.asarray(self._subs)

        stride = 1
        while numdec / stride + 1 > self.numticks:
            stride += 1

        decades = np.arange(math.floor(vmin), math.ceil(vmax) + stride, stride)
        if len(subs) > 1 or subs[0] != 1.0:
            ticklocs = []
            for decade in decades:
                ticklocs.extend(subs * (np.sign(decade) * b ** np.abs(decade)))
        else:
            ticklocs = np.sign(decades) * b ** np.abs(decades)
        return np.array(ticklocs)
Ejemplo n.º 2
0
def main():
    os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
    from django.core.management import execute_from_command_line
    from ot_logbook.models import Activity, Subcategory, Category, Location, \
        Equipment, ActivityData
    from django.contrib.auth.models import User

    argparser = argparse.ArgumentParser(description='fix elevation.')

    argparser.add_argument('user', help='user name to bind data')

    args = argparser.parse_args()
    
    # Check user
    try:
        user = User.objects.get(username=args.user)
        print 'User : '******'User does not exist.\n')
        sys.exit(1)

    downloader = SRTMDownloader()
    downloader.loadFileList()

    activitydata = ActivityData()
    elevation_null = ActivityData.objects.filter(elevation__isnull = True)
    for row in elevation_null[:2]:
        tile = downloader.getTile(int(floor(row.lat)),int(floor(row.lon)))
        print "%s Tile lat,lon : %i,%i srtm lat,lon : %f,%f : elevation : %f" % (row.datetime,floor(row.lat),floor(row.lon),row.lat,row.lon,tile.getAltitudeFromLatLon(row.lat,row.lon))
        row.elevation = tile.getAltitudeFromLatLon(row.lat,row.lon)
        row.save()
    print elevation_null.count()
    del activitydata
Ejemplo n.º 3
0
def colorfy(toolname):
	print("working")

	t = Tool.objects.get(tool_name__iexact=toolname)
	
	if t == None:
		return "ERROR - Can't find TOOL!" + toolname
	else:
		# parse return
		numerator = t.quantity_available
		denominator = t.quantity
		intensity = math.floor(255*1)

		try:
			n = numerator / denominator
		except ZeroDivisionError:
			n=0

		# now we colorfy it.
		r = math.floor(intensity - (intensity*n)) 
		g = math.floor((intensity*n))
		b = 0
		print(r,g,b)
		c = "color: rgb(" + str(r) + "," + str(g) + "," + str(b) +");"
		part1 = givemediv(45, c) + str(numerator) + "</div>"
		part2 = givemediv(10, "color: black;") + "/" + "</div>"
		part3 = givemediv(45, "color: rgb(0, " + str(intensity) + ", 0);" ) + str(denominator) + "</div>"

		return part1 + part2 + part3

		
Ejemplo n.º 4
0
def downloadChunks(url):
    """Helper to download large files
        the only arg is a url
       this file will go to a temp directory
       the file will also be downloaded
       in chunks and print out how much remains
    """

    baseFile = path.basename(url)

    #move the file to a more uniq path
    umask(0002)

    try:
        temp_path='/tmp'
        file = path.join(temp_path,baseFile)

        req = urllib2.urlopen(url)
        total_size = int(req.info().getheader('Content-Length').strip())
        downloaded = 0
        CHUNK = 256 * 10240
        with open(file, 'wb') as fp:
            while True:
                chunk = req.read(CHUNK)
                downloaded += len(chunk)
                print math.floor( (downloaded / total_size) * 100 )
                if not chunk: break
                fp.write(chunk)
    except urllib2.HTTPError, e:
        print "HTTP Error:",e.code , url
        return False
Ejemplo n.º 5
0
 def calculate_median(self,
                      minutes=1):
     """
     Calculates the median of all values in the collection
     after (now - minutes)
     """
     now = utc_now()
     created_after = now - timedelta(minutes=1)
     queryset = self.filter_query(created_after=created_after)
     count = queryset.count()
     # If there is no value within past 1 minute return None
     if count == 0:
         return None
     # Pick the center element in the collection
     # if the collection has a count which is odd
     elif (count % 2) != 0:
         return queryset.skip(math.floor(count/2))\
             .limit(1)\
             .all()[0].value
     # Pick the center 2 elements in the collection and calculate the avg
     # if the collection has a count which is even
     elif (count % 2) == 0:
         center_elements = queryset.skip(math.floor(count/2) - 1)\
             .limit(2)\
             .all()
         median = (center_elements[0].value + center_elements[1].value)/2.0
         return median
Ejemplo n.º 6
0
	def render2(a, b, vol, pos, knum, endamp = .25, sm = 10):
	    b2 = (1. - pause) * b
	    l=waves2(a, b2)
	    ow=""
	    q=int(l[0]*l[1])

	    lf = log(a)
	    t = (lf-3.) / (8.5-3.)
	    volfac = 1. + .8 * t * cos(pi/5.3*(lf-3.))
	    snd_len = int((10.-lf)*q)
	    if lf < 4: snd_len *= 2
	    x = np.arange(snd_len)
	    s = x / float(q)

	    ls = np.log(1. + s)
	    kp_len = int(l[0])
	    kps1 = np.zeros(snd_len)
	    kps2 = np.zeros(snd_len)
	    kps1[:kp_len] = np.random.normal(size = kp_len)

	    for t in range(kp_len):
		kps2[t] = kps1[t:t+sm].mean()
	    delt = float(l[0])
	    li = int(floor(delt))
	    hi = int(ceil(delt))
	    ifac = delt % 1
	    delt2 = delt * (floor(delt) - 1) / floor(delt)
	    ifac2 = delt2 % 1
	    falloff = (4./lf*endamp)**(1./l[1])
	    for t in range(hi, snd_len):
		v1 = ifac * kps2[t-hi]   + (1.-ifac) * kps2[t-li]
		v2 = ifac2 * kps2[t-hi+1] + (1.-ifac2) * kps2[t-li+1]
	        kps2[t] += .5 * (v1 + v2) * falloff
	    data[pos:pos+snd_len] += kps2*vol*volfac
Ejemplo n.º 7
0
def get_grid_position(x, y, dim_x, dim_y, bounds):
    r""" Gives the position of point on a grid
    
    Devides the domain as given by the boundaries into
    a grid of dimension :math:`\mathtt{dim_x}\times\mathtt{dim_y}`
    and gives the grid-coordinates the point falls into.
    
    Parameters
    ----------
    x, y : float
        Coordinates of a point within the domain.
    dim_x, dim_y : int
        Dimensions of the 2d grid.
    bounds : array_like
        An array_like object of dimension (2,2)
        describing the boundaries of the domain,
        e.g. ((min_x, max_x), (min_y, max_x))
    
    Returns
    -------
    grid_x, grid_y : int
        Coordinates of the tile the point falls into.
    """
    grid_x = min(math.floor(abs(x - bounds[0][0]) / 
                        abs(bounds[0][0] - bounds[0][1]) * dim_x), dim_x-1)
    grid_y = min(math.floor(abs(y - bounds[1][0]) / 
                        abs(bounds[1][0] - bounds[1][1]) * dim_y), dim_y-1)
    return (grid_x, grid_y)
Ejemplo n.º 8
0
	def segment_units(self, sfid, verb=False):
		"""
		This function takes the list of unit breakpoints, plus the raw metadata, and assembles 'cooked' segments in the corpus segtable.
		
		Note: currently ignores the amplitude scalars (aside from generating stats)...
		
		"""
		segmented = self.get_sorted_units_list(sfid)
		raw_amps, raw_mfccs, raw_chromas = self.get_raw_metadata(sfid) # , raw_chromas
		amps, reheated = [], []
		
		if verb: print 'raw: ', raw_amps
		amps_stripped = np.nan_to_num(raw_amps)
		if verb: print 'amps_stripped: ', amps_stripped
		mfccs_stripped = np.nan_to_num(raw_mfccs)
		if verb: print 'mfccs_stripped: ', mfccs_stripped
		chromas_stripped = np.nan_to_num(raw_chromas)
		if verb: print 'chromas_stripped: ', chromas_stripped

		for relid, sfu in enumerate(segmented):

			offset = int(math.floor(sfu.onset / self.HOP_SECS))
			dur = int(math.floor(sfu.dur / self.HOP_SECS))
			if verb: print '[[', offset, '|', dur, ']]'
			self.sftree.nodes[sfid].add_metadata_for_relid(relid, powers=self.feat.powers.proc_funcs[0](amps_stripped, offset, dur))
			# WHY ARE THE FUNCTION SIGNATURES DIFFERENT FOR OFFSET AND DUR???
			self.sftree.nodes[sfid].add_metadata_for_relid(relid, mfccs=self.feat.proc_funcs[0](mfccs_stripped[offset:(offset+dur)]))
			if verb: print self.feat.proc_funcs[1]
			if verb: print mfccs_stripped[offset:(offset+dur)]
			self.sftree.nodes[sfid].add_metadata_for_relid(relid, mfcc_vars=self.feat.proc_funcs[1](mfccs_stripped[offset:(offset+dur)]))
			self.sftree.nodes[sfid].add_metadata_for_relid(relid, chromas=self.feat.proc_funcs[0](chromas_stripped[offset:(offset+dur)]))
			self.sftree.nodes[sfid].add_metadata_for_relid(relid, chroma_vars=self.feat.proc_funcs[1](chromas_stripped[offset:(offset+dur)]))
Ejemplo n.º 9
0
def bucketSort(array, bucketSize=1):
    if len(array) == 0:
        return array

    # Determine minimum and maximum values
    minValue = array[0]
    maxValue = array[0]
    for i in range(1, len(array)):
        if array[i] < minValue:
            minValue = array[i]
        elif array[i] > maxValue:
            maxValue = array[i]

    # Initialize buckets
    bucketCount = math.floor((maxValue - minValue) / bucketSize) + 1
    bucketCount = int(bucketCount)
    buckets = []
    for i in range(0, bucketCount):
        buckets.append([])

    # Distribute input array values into buckets
    for i in range(0, len(array)):
        buckets[int(math.floor((array[i] - minValue) / bucketSize))].append(array[i])

    # Sort buckets and place back into input array
    array = []
    for i in range(0, len(buckets)):
        insertSort(buckets[i])
        for j in range(0, len(buckets[i])):
            array.append(buckets[i][j])

    return array
Ejemplo n.º 10
0
 def pos_to_coords(self, pos):
     """Take pixel positions and turn them into coordinates (from 0 to n_rows ** 2 - 1)."""
     x, y = pos
     coords = (int(math.floor(x/self.tile_line_size)), int(math.floor(y/self.tile_line_size)))
     logger.debug('PygameBoard.pos_to_coords')
     logger.debug('{}, {}, {}, {}'.format('Coords', coords, type(coords), [type(x) for x in coords]))
     return coords
Ejemplo n.º 11
0
    def format_seconds(self, elapsed):
        hours = math.floor(elapsed / 3600)
        remainder = elapsed % 3600
        minutes = math.floor(remainder / 60)
        seconds = remainder % 60

        return "%.2d:%.2d:%.2d" % (hours, minutes, seconds)
Ejemplo n.º 12
0
 def format(self, record):
     '''format a record for display'''
     returns = []
     line_len = self.width
     if type(record.msg) in types.StringTypes:
         for line in logging.Formatter.format(self, record).split('\n'):
             if len(line) <= line_len:
                 returns.append(line)
             else:
                 inner_lines = int(math.floor(float(len(line)) / line_len))+1
                 for i in xrange(inner_lines):
                     returns.append("%s" % (line[i*line_len:(i+1)*line_len]))
     elif type(record.msg) == types.ListType:
         if not record.msg:
             return ''
         # getMessage() must be called so that arguments are substituted; eval() is used to turn the string back into a list
         msgdata = eval(record.getMessage())
         msgdata.sort()
         msgwidth = self.width
         columnWidth = max([len(str(item)) for item in msgdata])
         columns = int(math.floor(float(msgwidth) / (columnWidth+2)))
         lines = int(math.ceil(float(len(msgdata)) / columns))
         for lineNumber in xrange(lines):
             indices = [idx for idx in [(colNum * lines) + lineNumber
                                        for colNum in range(columns)] if idx < len(msgdata)]
             format = (len(indices) * (" %%-%ds " % columnWidth))
             returns.append(format % tuple([msgdata[idx] for idx in indices]))
     #elif type(record.msg) == lxml.etree._Element:
     #    returns.append(str(xml_print(record.msg)))
     else:
         returns.append(str(record.getMessage()))
     if record.exc_info:
         returns.append(self.formatException(record.exc_info))
     return '\n'.join(returns)
Ejemplo n.º 13
0
def int_to_base_array(x, base):
	"""
	Pass the number you wish to format, plus a base number (e.g. for Arabic numerals, 10).
	You will receive an big-endian array of numbers representing each glyph of your formatted number.
	
	Examples:
		int_to_base_array(255,10) returns [2,5,5]
		int_to_base_array(255,2) returns [1,1,1,1,1,1,1,1]
	
	Don't try base-1!
	
	@type	x:		integer
	@param	x:		the integer you wish to convert to a base array
	
	@type	base:	integer
	@param	base:	the base-number of the array you wish to convert to
	
	@rtype:			list
	@return:		A list of integers representing each glyph of your base-X numeral, in big-endian format.
	"""
	arr = []
	modulo = base
	while modulo <= x:
		arr.append(int(math.floor((x % modulo)/(modulo/base))))
		modulo = modulo * base
	arr.append(int(math.floor((x % modulo)/(modulo/base))))	# And then once more
	
	arr.reverse()
	return arr
Ejemplo n.º 14
0
    def findMousePos(self, pos):
        # put hit position in window relative coords
        x = pos[0] - self.rect[0] - 2
        y = pos[1] - self.rect[1] - 5
        
        font_width, font_heigth = getRenderer().getTextSize( ' ', self.font )
        
        col = max( math.floor( (float(x) / float(font_width)) ) + self.scroll[0], 0 )
        row = max( math.floor( (float(y) / float(font_heigth)) ) + self.scroll[1], 0 )
        
        r, c, i = 0, 0, 0
        while r != row:
            try: i = self.text.index( '\n', i ) + 1
            except: break
            r += 1
        
        while c < col:
            if i >= len( self.text) or self.text[i] == '\n':
                break
            elif self.text[i] == '\t':
                if ( c + 4 > col ): break
                c += 8
            else:
                c += 1
            i += 1

        return min( max( i, 0 ), len( self.text ) )
Ejemplo n.º 15
0
    def divideRect( self, x, y, width, height, c1, c2, c3, c4 ):
        new_width = math.floor( width / 2 )
        new_height = math.floor( height / 2 )

        if ( width > 1 or height > 1 ):
            # average of all the points and normalize in case of "out of bounds" during displacement
            mid = self.normalize( self.normalize( ( ( c1 + c2 + c3 + c4 ) / 4 ) + self.displace( new_width + new_height ) ) )

            # midpoint of the edges is the average of its two end points
            edge1 = self.normalize( ( c1 + c2 ) / 2 )
            edge2 = self.normalize( ( c2 + c3 ) / 2 )
            edge3 = self.normalize( ( c3 + c4 ) / 2 )
            edge4 = self.normalize( ( c4 + c1 ) / 2 )

            # recursively go down the rabbit hole
            self.divideRect( x, y, new_width, new_height, c1, edge1, mid, edge4 )
            self.divideRect( x + new_width, y, new_width, new_height, edge1, c2, edge2, mid )
            self.divideRect( x + new_width, y + new_height, new_width, new_height, mid, edge2, c3, edge3 )
            self.divideRect( x, y + new_height, new_width, new_height, edge4, mid, edge3, c4 )

        else:
            c = ( c1 + c2 + c3 + c4 ) / 4

            self.heightmap[x][y] = c

            if ( width == 2 ):
                self.heightmap[x + 1][y] = c
            if ( height == 2 ):
                self.heightmap[x][y + 1] = c
            if ( width == 2 and height == 2 ):
                self.heightmap[x + 1][y + 1] = c
Ejemplo n.º 16
0
    def SetValue(self, value):
        """ Sets the FloatSpin value. """

        if not self._textctrl or not self.InRange(value):
            return

        if self._snapticks and self._increment != 0.0:

            finite, snap_value = self.IsFinite(value)

            if not finite: # FIXME What To Do About A Failure?

                if (snap_value - floor(snap_value) < ceil(snap_value) - snap_value):
                    value = self._defaultvalue + floor(snap_value)*self._increment
                else:
                    value = self._defaultvalue + ceil(snap_value)*self._increment

        strs = ("%100." + str(self._digits) + self._textformat[1])%value
        strs = strs.strip()
        strs = self.ReplaceDoubleZero(strs)

        if value != self._value or strs != self._textctrl.GetValue():

            self._textctrl.SetValue(strs)
            self._textctrl.DiscardEdits()
            self._value = value
Ejemplo n.º 17
0
 def getElementAtPoint(self, point):
     '''
     Returns a leaf Element which contains the point.
     '''
     # First, we check that the point does fall inside the mesh.
     if not self.boundingBox.containsPoint(point):
         return None
     
     # Since the root elements in the mesh have a fixed size and spatial arrangement, it's simple to 
     # figure out which root element a point is in without having to poll any other elements.
     
     # Start by converting the point in (x,y) in global units into a relative coord (i,j) measured in cell counts.
     relativeLocation = (point - self.bottomLeft).scaledBy(1/self.maxCellSize)
     i = math.floor(relativeLocation.x)
     j = math.floor(relativeLocation.y)
     
     # Figure out which element that is.
     e = self.elements[i*self.verticalCellCount+j]
          
     # As a safety net, we check that the element does contain the point. If it doesn't, we risk infinite 
     # recursion. There's probably something wrong if that happens, so let's raise an exception.
     if e.boundingBox.containsPoint(point):
         return e.getElementAtPoint(point)
     else:
         print("Need to query an element",e,"for a point ",point,", but it's the wrong element.")
         raise Exception("Fatal Error: Parent mesh attempted to query an element for a point it did not contain.")
Ejemplo n.º 18
0
    def interpolate_cartesian(self, start_pos, start_rot, end_pos, end_rot, pos_spacing, rot_spacing, num_steps = 0):

        #normalize quaternion rotations just in case
        norm_start_rot = self.normalize_vect(start_rot)
        norm_end_rot = self.normalize_vect(end_rot)

        #the desired wrist translation
        diff = [x-y for (x,y) in zip(end_pos, start_pos)]

        if num_steps == 0:
            #compute how far the wrist translates
            pos_move = self.vect_norm(diff)

            #compute how far the wrist rotates
            rot_move = self.quat_angle(norm_start_rot, norm_end_rot)

            #compute the number of steps to move no more than pos_spacing and rot_spacing in each step
            #(min 2, start and end)
            num_steps_pos = math.floor(pos_move/pos_spacing)+1
            num_steps_rot = math.floor(rot_move/rot_spacing)+1
            num_steps = int(max([num_steps_pos, num_steps_rot])+1)  
        
        #interpolate
        steps = []
        for stepind in range(num_steps):
            fraction = float(stepind)/(num_steps-1)  #add both start (0) and end (1)
            rot = list(tf.transformations.quaternion_slerp(norm_start_rot, norm_end_rot, fraction))
            pos = list(numpy.array(diff)*fraction + numpy.array(start_pos))
            steps.append((pos, rot))
            #print "fraction: %5.3f"%fraction, "pos:", pplist(pos), "rot:", pplist(rot)

        return steps
Ejemplo n.º 19
0
 def setAppearance(self):
     """
     A setter for the appearance of the tower.
     """
     self.towerDiv = avg.DivNode(size=util.towerDivSize, pos=(self.pos.x - util.towerDivSize[0]//2, self.pos.y-util.towerDivSize[1]//2))
     
     #sets the explosion radius
     self.towerCircle = avg.CircleNode(fillopacity=0.3, strokewidth=0, fillcolor=self.team.color, r=self.towerDiv.size.x//2, pos=(self.towerDiv.size.x//2,self.towerDiv.size.y//2), parent=self.towerDiv)
     
     
     #sets the fancy snow balls
     
     for i in xrange(5):
         radius = self.towerDiv.size[0]//10
         xPos = random.randint(0 + math.floor(radius), math.ceil(self.towerDiv.size.x - radius))
         yPos = random.randint(0 + math.floor(radius), math.ceil(self.towerDiv.size.y - radius))
         
         snowball = avg.CircleNode(fillopacity=0.5, strokewidth=0, filltexhref=os.path.join(getMediaDir(__file__, "resources"), "snowflakes.png"), r=radius, pos=(xPos,yPos), parent=self.towerDiv)
         
         self.snowballAnim(xPos,yPos,snowball)
         
     
     self.tower = avg.RectNode(fillopacity=1, strokewidth=0, size=util.towerSize, pos=(self.pos.x  - util.towerSize[0] // 2, self.pos.y - util.towerSize[1] // 2))
     
     
     if self.team.name == "Team2":
         self.tower.filltexhref = os.path.join(getMediaDir(__file__, "resources"), "iceball.png")
     else:
         self.tower.filltexhref = os.path.join(getMediaDir(__file__, "resources"), "iceball.png")
Ejemplo n.º 20
0
    def finish_initializing(self, builder):   # pylint: disable=E1002

        self.sm = SessionManager('gecos-firstart')
        self.sm.start()

        iconfile = config.get_data_file('media', '%s' % ('wizard1.png',))
        self.set_icon_from_file(iconfile)

        screen = Gdk.Screen.get_default()
        sw = math.floor(screen.width() - screen.width() / 8)
        sh = math.floor(screen.height() - screen.height() / 9)
        self.resize(sw, sh)

        self.ui.btnTest.set_visible(False)
        self.ui.btnTest.set_sensitive(False)

        self.show_browser()
        self.block()

        self.dbusclient = DBusClient()
        self.dbusclient.connect('state-changed', self.on_dbusclient_state_changed)

        try:
            self.dbusclient.start()
            self.dbusclient.user_login()
            state = self.dbusclient.get_state(reply_handler=self.reply_handler, error_handler=self.error_handler)

        except Exception as e:
            self.unblock()
Ejemplo n.º 21
0
 def _drawTester(self, dt):  
     #test box delete 
     time.sleep(dt)
     (r,c,t,s) = self.world.spArray[0]
     self.update_idletasks()
     self._dbox(r,c)
     time.sleep(dt)
     (r,c,t,s) = self.world.spArray[4]
     self.update_idletasks()
     self._dbox(r,c)
     time.sleep(dt)
     (r,c,t,s) = self.world.spArray[23]
     self.update_idletasks()
     self._dbox(r,c)
     time.sleep(dt)
     (r,c,t,s) = self.world.spArray[2]
     self.update_idletasks()
     self._dbox(r,c)
     time.sleep(dt)
     (r,c,t,s) = self.world.spArray[12]
     self.update_idletasks()
     self._dbox(r,c)
     time.sleep(dt)
     (r,c,t,s) = self.world.spArray[6]
     self.update_idletasks()
     self._dbox(r,c)
     #test cannon ball redraw
     for i in range(50):
         time.sleep(dt)
         (r,c) = (floor(6*i*dt-.5*(i*dt)**2), floor(5*i*dt))
         bln = not i%12
         self._ushot(r,c,bln)
         self._pupdate(15, (float(i)/49)*45)
         self.update_idletasks()
         print (float(i)/49)*45
Ejemplo n.º 22
0
def stftFiltering(x, fs, w, N, H, filter):
# apply a filter to a sound by using the STFT
# x: input sound, w: analysis window, N: FFT size, H: hop size
# filter: magnitude response of filter with frequency-magnitude pairs (in dB)
# returns y: output sound
	M = w.size                                     # size of analysis window
	hM1 = int(math.floor((M+1)/2))                 # half analysis window size by rounding
	hM2 = int(math.floor(M/2))                     # half analysis window size by floor
	x = np.append(np.zeros(hM2),x)                 # add zeros at beginning to center first window at sample 0
	x = np.append(x,np.zeros(hM1))                 # add zeros at the end to analyze last sample
	pin = hM1                                      # initialize sound pointer in middle of analysis window       
	pend = x.size-hM1                              # last sample to start a frame
	w = w / sum(w)                                 # normalize analysis window
	y = np.zeros(x.size)                           # initialize output array
	while pin<=pend:                               # while sound pointer is smaller than last sample      
	#-----analysis-----  
		x1 = x[pin-hM1:pin+hM2]                      # select one frame of input sound
		mX, pX = DFT.dftAnal(x1, w, N)               # compute dft
	#------transformation-----
		mY = mX + filter                             # filter input magnitude spectrum
	#-----synthesis-----
		y1 = DFT.dftSynth(mY, pX, M)                # compute idft
		y[pin-hM1:pin+hM2] += H*y1                  # overlap-add to generate output sound
		pin += H                                    # advance sound pointer
	y = np.delete(y, range(hM2))                  # delete half of first window which was added in stftAnal
	y = np.delete(y, range(y.size-hM1, y.size))   # add zeros at the end to analyze last sample
	return y
Ejemplo n.º 23
0
    def from_jd(cls, jd):
        """
            Convert a Julian day number to a year/month/day tuple
            of this calendar (matching jQuery calendars algorithm)

            @param jd: the Julian day number
        """

        jd = math.floor(jd) + 0.5;

        depoch = jd - cls.to_jd(475, 1, 1)

        cycle = math.floor(depoch / 1029983)
        cyear = math.fmod(depoch, 1029983)

        if cyear != 1029982:
            aux1 = math.floor(cyear / 366)
            aux2 = math.fmod(cyear, 366)
            ycycle = math.floor(((2134 * aux1) + (2816 * aux2) + 2815) / 1028522) + aux1 + 1
        else:
            ycycle = 2820

        year = ycycle + (2820 * cycle) + 474
        if year <= 0:
            year -= 1

        yday = jd - cls.to_jd(year, 1, 1) + 1
        if yday <= 186:
            month = math.ceil(yday / 31)
        else:
            month = math.ceil((yday - 6) / 30)

        day = jd - cls.to_jd(year, month, 1) + 1

        return (int(year), int(month), int(day))
Ejemplo n.º 24
0
 def optimize_for_oom(oom):
     self.colorbar_step = math.floor(step / 10**oom)*10**oom
     self.colorbar_vmin = math.floor(vmin / 10**oom)*10**oom
     self.colorbar_vmax = self.colorbar_vmin + \
     self.colorbar_step * (number_of_ticks - 1)
     self.colorbar_locs = np.arange(0, number_of_ticks
                 )* self.colorbar_step + self.colorbar_vmin
Ejemplo n.º 25
0
    def plotLimits(self, xy):
        ymax = -1.e10
        ymin = 1.e10
        for x, y in xy:
            if y > ymax: ymax = y
            if y < ymin: ymin = y

        dy = abs(ymax - ymin)
        if dy < 0.2*ymin:
            ymin = ymin*.9
            ymax = ymax*1.1
            dy = abs(ymax - ymin)
        else:
            ymin = ymin - 0.1*dy
            ymax = ymax + 0.1*dy
            dy = abs(ymax - ymin)

        p10 = math.floor(math.log10(0.1*dy))
        fctr = math.pow(10.0, p10)
        mm = [2.0, 2.5, 2.0]
        i = 0
        while dy/fctr > 5:
            fctr = mm[i % 3]*fctr
            i = i + 1
        ymin = fctr*math.floor(ymin/fctr)
        ymax = fctr*(math.floor(ymax/fctr + 1))
        return (ymin, ymax, fctr)
Ejemplo n.º 26
0
def _write_float(f, x):
	import math
	if x < 0:
		sign = 0x8000
		x = x * -1
	else:
		sign = 0
	if x == 0:
		expon = 0
		himant = 0
		lomant = 0
	else:
		fmant, expon = math.frexp(x)
		if expon > 16384 or fmant >= 1:		# Infinity or NaN
			expon = sign|0x7FFF
			himant = 0
			lomant = 0
		else:					# Finite
			expon = expon + 16382
			if expon < 0:			# denormalized
				fmant = math.ldexp(fmant, expon)
				expon = 0
			expon = expon | sign
			fmant = math.ldexp(fmant, 32)
			fsmant = math.floor(fmant)
			himant = long(fsmant)
			fmant = math.ldexp(fmant - fsmant, 32)
			fsmant = math.floor(fmant)
			lomant = long(fsmant)
	_write_short(f, expon)
	_write_long(f, himant)
	_write_long(f, lomant)
Ejemplo n.º 27
0
def tile(lng, lat, zoom, truncate=False):
    """Get the tile containing a longitude and latitude

    Parameters
    ----------
    lng, lat : float
        A longitude and latitude pair in decimal degrees.
    zoom : int
        The web mercator zoom level.
    truncate : bool, optional
        Whether or not to truncate inputs to limits of web mercator.

    Returns
    -------
    Tile
    """
    if truncate:
        lng, lat = truncate_lnglat(lng, lat)
    lat = math.radians(lat)
    n = 2.0 ** zoom
    xtile = int(math.floor((lng + 180.0) / 360.0 * n))

    try:
        ytile = int(math.floor((1.0 - math.log(
            math.tan(lat) + (1.0 / math.cos(lat))) / math.pi) / 2.0 * n))
    except ValueError:
        raise InvalidLatitudeError(
            "Y can not be computed for latitude {} radians".format(lat))
    else:
        return Tile(xtile, ytile, zoom)
Ejemplo n.º 28
0
 def from_context_stroke(self, context):
   '''
   Get the DCS bounds of the path in the graphics context.
   Stroke, that is, as inked, not as ideal path.
   '''
   # extents of rect in UCS, aligned with axis
   ulx, uly, lrx, lry = context.stroke_extents() 
   # Two other corners of the rect
   llx = ulx
   lly = lry
   urx = lrx
   ury = uly
   # Four points in DCS, corners of rect NOT axis aligned,
   # and no relationships known between points in DCS
   p1xd, p1yd = context.user_to_device(ulx, uly)
   p2xd, p2yd = context.user_to_device(llx, lly)
   p3xd, p3yd = context.user_to_device(lrx, lry)
   p4xd, p4yd = context.user_to_device(urx, ury)
   # DCS bounds are min and max of device coords of all four points.
   # Snap to outside pixel using floor, ceiling.
   # Convert to int
   minxi = int(math.floor(min(p1xd, p3xd, p2xd, p4xd)))
   minyi = int(math.floor(min(p1yd, p3yd, p2yd, p4yd)))
   maxxi =  int(math.ceil(max(p1xd, p3xd, p2xd, p4xd)))
   maxyi =  int(math.ceil(max(p1yd, p3yd, p2yd, p4yd)))
   width = maxxi - minxi
   height = maxyi - minyi
   self = Bounds(minxi, minyi, width, height)
   # !!! Cannot assert not is_null: line width tiny or other factors
   # may yield empty stroke extents.
   return self
Ejemplo n.º 29
0
 def from_extents(self, ulx, uly, lrx, lry):
   '''
   Set value from an extent tuple.
   For example, cairo extents, floats, inked, converted to DCS.
   
   !!! ulx may be greater than lrx, etc. due to transformations
   
   !!! Extents may be either path (ideal) or stroke (inked).
   The ideal extent of a line can have zero width or height.
   
   !!! User may zoom out enough that bounds approach zero,
   even equal zero?
   
   !!! Parameters are float i.e. fractional.
   Bounds are snapped to the outside pixel boundary.
   '''
   # Snap to integer boundaries and order on the number line.
   # !!! Note int(floor()) not just int()
   minxi = int(math.floor(min(ulx, lrx)))
   minyi = int(math.floor(min(uly, lry)))
   maxxi = int(math.ceil(max(ulx, lrx)))
   maxyi = int(math.ceil(max(uly, lry)))
   width = maxxi - minxi
   height = maxyi - minyi
   # width or height or both can be zero, for example setting transform on empty model
   
   # snap float rect to outside integral pixel
   ## self = gdk.Rectangle(minxi, minyi, width, height)
   self = Bounds(minxi, minyi, width, height)
   # not assert x,y positive
   # assert self.width >= 0  # since abs used
   if self.is_null():
     print "!!!!!!!!!!!! Null bounds", self
   return self
Ejemplo n.º 30
0
 def _hintIter(self, monkey):
     angle = 45
     dir = 0
     dA = 1
     pow = self.world.P[1]
     x_m = monkey[1]
     y_m = monkey[0]
     
     y = x_m*math.tan(angle*(math.pi/180)) - (self.world.g/2) * (x_m/(pow * math.cos(angle*(math.pi/180))))**2
     if floor(y) < y_m:
         return "Not in range"
     while(floor(y) != y_m and angle >= 0 and angle <= 90):
         if floor(y) < y_m:
             if dir == -1:
                 dA = dA/2
             angle += dA
             theta = angle * (math.pi / 180)
             y = x_m*math.tan(theta) - self.world.g * 0.5 * (x_m**2) / (pow**2) / ((math.cos(theta))**2)
             dir = 1
         else:
             if dir == 1:
                 dA = dA/2
             angle -= dA
             theta = angle * (math.pi / 180)
             y = x_m*math.tan(theta) - self.world.g * 0.5 * (x_m**2) / (pow**2) / ((math.cos(theta))**2)
             dir = -1                
     if floor(y) == y_m:
         return angle
     else:
         return "Angle not found"
 def calc_change_pct(self, iteracao, max_iter):
     ans = math.floor(self.bits * 0.7 * math.floor(
         (max_iter - iteracao) / 100.0) / 10.0)
     if ans > 1.0:
         return float(ans)
     return 1.0
def training_worker(graph_manager, task_parameters, user_batch_size,
                    user_episode_per_rollout):
    try:
        # initialize graph
        graph_manager.create_graph(task_parameters)

        # save initial checkpoint
        graph_manager.save_checkpoint()

        # training loop
        steps = 0

        graph_manager.setup_memory_backend()
        graph_manager.signal_ready()

        # To handle SIGTERM
        door_man = utils.DoorMan()

        #         print('---------------- hook.out_dir ----------------')
        #         print(hook.out_dir)

        #         print('---------------- hook.dry_run ----------------')
        #         print(hook.dry_run)

        #         print('---------------- hook.save_config ----------------')
        #         print(hook.save_config)

        #         print('---------------- hook.include_regex ----------------')
        #         print(hook.include_regex)

        #         print('---------------- hook.include_collections ----------------')
        #         print(hook.include_collections)

        #         print('---------------- hook.save_all ----------------')
        #         print(hook.save_all)

        #         print('---------------- hook.include_workers ----------------')
        #         print(hook.include_workers)

        for level in graph_manager.level_managers:
            for agent in level.agents.values():
                for item in agent.networks.items():
                    name = item[0]
                    network = item[1]

                    print("NETWORK:")
                    print(name)
                    print(network)

                    if network.global_network is not None:
                        network.global_network.optimizer = graph_manager.smdebug_hook.wrap_optimizer(
                            network.global_network.optimizer)

                    if network.online_network is not None:
                        network.online_network.optimizer = graph_manager.smdebug_hook.wrap_optimizer(
                            network.online_network.optimizer)

                    if network.target_network is not None:
                        network.target_network.optimizer = graph_manager.smdebug_hook.wrap_optimizer(
                            network.target_network.optimizer)

        while steps < graph_manager.improve_steps.num_steps:
            # Collect profiler information only IS_PROFILER_ON is true
            with utils.Profiler(
                    s3_bucket=PROFILER_S3_BUCKET,
                    s3_prefix=PROFILER_S3_PREFIX,
                    output_local_path=TRAINING_WORKER_PROFILER_PATH,
                    enable_profiling=IS_PROFILER_ON):
                graph_manager.phase = core_types.RunPhase.TRAIN
                graph_manager.fetch_from_worker(
                    graph_manager.agent_params.algorithm.
                    num_consecutive_playing_steps)
                graph_manager.phase = core_types.RunPhase.UNDEFINED

                episodes_in_rollout = graph_manager.memory_backend.get_total_episodes_in_rollout(
                )

                for level in graph_manager.level_managers:
                    for agent in level.agents.values():
                        agent.ap.algorithm.num_consecutive_playing_steps.num_steps = episodes_in_rollout
                        agent.ap.algorithm.num_steps_between_copying_online_weights_to_target.num_steps = episodes_in_rollout

                if graph_manager.should_train():
                    # Make sure we have enough data for the requested batches
                    rollout_steps = graph_manager.memory_backend.get_rollout_steps(
                    )
                    if any(rollout_steps.values()) <= 0:
                        log_and_exit(
                            "No rollout data retrieved from the rollout worker",
                            SIMAPP_TRAINING_WORKER_EXCEPTION,
                            SIMAPP_EVENT_ERROR_CODE_500)

                    episode_batch_size = user_batch_size if min(
                        rollout_steps.values(
                        )) > user_batch_size else 2**math.floor(
                            math.log(min(rollout_steps.values()), 2))
                    # Set the batch size to the closest power of 2 such that we have at least two batches, this prevents coach from crashing
                    # as  batch size less than 2 causes the batch list to become a scalar which causes an exception
                    for level in graph_manager.level_managers:
                        for agent in level.agents.values():
                            agent.ap.network_wrappers[
                                'main'].batch_size = episode_batch_size

                    steps += 1

                    graph_manager.phase = core_types.RunPhase.TRAIN
                    graph_manager.train()
                    graph_manager.phase = core_types.RunPhase.UNDEFINED

                    # Check for Nan's in all agents
                    rollout_has_nan = False
                    for level in graph_manager.level_managers:
                        for agent in level.agents.values():
                            if np.isnan(agent.loss.get_mean()):
                                rollout_has_nan = True

                    if rollout_has_nan:
                        log_and_exit(
                            "NaN detected in loss function, aborting training.",
                            SIMAPP_TRAINING_WORKER_EXCEPTION,
                            SIMAPP_EVENT_ERROR_CODE_500)

                    if graph_manager.agent_params.algorithm.distributed_coach_synchronization_type == DistributedCoachSynchronizationType.SYNC:
                        graph_manager.save_checkpoint()
                    else:
                        graph_manager.occasionally_save_checkpoint()

                    # Clear any data stored in signals that is no longer necessary
                    graph_manager.reset_internal_state()

                for level in graph_manager.level_managers:
                    for agent in level.agents.values():
                        agent.ap.algorithm.num_consecutive_playing_steps.num_steps = user_episode_per_rollout
                        agent.ap.algorithm.num_steps_between_copying_online_weights_to_target.num_steps = user_episode_per_rollout


#                         for item in agent.networks.items():
#                             name = item[0]
#                             network = item[1]

#                             print("NETWORK:")
#                             print(name)
#                             print(network)

#                             print("--------------------------global_network--------------------------")
#                             print(network.global_network)
#                             print("--------------------------online_network--------------------------")
#                             print(network.online_network)
#                             print("--------------------------target_network--------------------------")
#                             print(network.target_network)

#                             if network.global_network is not None:
#                                 hook.add_to_collection("losses", network.global_network.total_loss)
#                                 smdebug_collection.add(network.global_network.total_loss)

#                             if network.online_network is not None:
#                                 hook.add_to_collection("losses", network.online_network.total_loss)
#                                 smdebug_collection.add(network.online_network.total_loss)

#                             if network.target_network is not None:
#                                 hook.add_to_collection("losses", network.target_network.total_loss)
#                                 smdebug_collection.add(network.target_network.total_loss)

                if door_man.terminate_now:
                    log_and_exit(
                        "Received SIGTERM. Checkpointing before exiting.",
                        SIMAPP_TRAINING_WORKER_EXCEPTION,
                        SIMAPP_EVENT_ERROR_CODE_500)
                    graph_manager.save_checkpoint()
                    break

    except ValueError as err:
        if utils.is_error_bad_ckpnt(err):
            log_and_exit("User modified model: {}".format(err),
                         SIMAPP_TRAINING_WORKER_EXCEPTION,
                         SIMAPP_EVENT_ERROR_CODE_400)
        else:
            log_and_exit("An error occured while training: {}".format(err),
                         SIMAPP_TRAINING_WORKER_EXCEPTION,
                         SIMAPP_EVENT_ERROR_CODE_500)
    except Exception as ex:
        log_and_exit("An error occured while training: {}".format(ex),
                     SIMAPP_TRAINING_WORKER_EXCEPTION,
                     SIMAPP_EVENT_ERROR_CODE_500)
    finally:
        graph_manager.data_store.upload_finished_file()
Ejemplo n.º 33
0
def q(n):
    p(int(math.floor(n * 100)))
Ejemplo n.º 34
0
def make_circuit(n:int,f) -> QuantumCircuit:
    # circuit begin
    input_qubit = QuantumRegister(n,"qc")
    classical = ClassicalRegister(n, "qm")
    prog = QuantumCircuit(input_qubit, classical)
    prog.h(input_qubit[0]) # number=3
    prog.h(input_qubit[1]) # number=4
    prog.h(input_qubit[2]) # number=5
    prog.h(input_qubit[3]) # number=6
    prog.h(input_qubit[4])  # number=21
    prog.x(input_qubit[2]) # number=26

    Zf = build_oracle(n, f)

    repeat = floor(sqrt(2 ** n) * pi / 4)
    for i in range(1):
        prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
        prog.h(input_qubit[0])  # number=1
        prog.h(input_qubit[1])  # number=2
        prog.h(input_qubit[2])  # number=7
        prog.h(input_qubit[3])  # number=8
        prog.y(input_qubit[3]) # number=25


        prog.x(input_qubit[0])  # number=9
        prog.h(input_qubit[1])  # number=32
        prog.cz(input_qubit[0],input_qubit[1])  # number=33
        prog.h(input_qubit[1])  # number=34
        prog.cx(input_qubit[0],input_qubit[1])  # number=35
        prog.cx(input_qubit[0],input_qubit[1])  # number=38
        prog.x(input_qubit[1])  # number=39
        prog.cx(input_qubit[0],input_qubit[1])  # number=40
        prog.cx(input_qubit[0],input_qubit[1])  # number=37
        prog.cx(input_qubit[0],input_qubit[1])  # number=30
        prog.cx(input_qubit[0],input_qubit[2])  # number=22
        prog.x(input_qubit[2])  # number=23
        prog.y(input_qubit[3]) # number=27
        prog.cx(input_qubit[0],input_qubit[2])  # number=24
        prog.x(input_qubit[3])  # number=12
        prog.cx(input_qubit[1],input_qubit[2]) # number=31

        if n>=2:
            prog.mcu1(pi,input_qubit[1:],input_qubit[0])

        prog.x(input_qubit[0])  # number=13
        prog.x(input_qubit[1])  # number=14
        prog.x(input_qubit[2])  # number=15
        prog.x(input_qubit[3])  # number=16


        prog.h(input_qubit[0])  # number=17
        prog.h(input_qubit[1])  # number=18
        prog.h(input_qubit[2])  # number=19
        prog.h(input_qubit[3])  # number=20

        prog.h(input_qubit[0])  
        prog.h(input_qubit[1])
        prog.h(input_qubit[2])
        prog.h(input_qubit[3])


    # circuit end

    for i in range(n):
        prog.measure(input_qubit[i], classical[i])


    return prog
Ejemplo n.º 35
0
def make_circuit(n:int,f) -> QuantumCircuit:
    # circuit begin
    input_qubit = QuantumRegister(n,"qc")
    classical = ClassicalRegister(n, "qm")
    prog = QuantumCircuit(input_qubit, classical)
    prog.h(input_qubit[0]) # number=3
    prog.rx(-1.3603096190043806,input_qubit[2]) # number=28
    prog.h(input_qubit[1]) # number=4
    prog.h(input_qubit[2]) # number=5
    prog.h(input_qubit[3]) # number=6
    prog.h(input_qubit[4])  # number=21

    Zf = build_oracle(n, f)

    repeat = floor(sqrt(2 ** n) * pi / 4)
    for i in range(repeat):
        prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
        prog.h(input_qubit[0])  # number=1
        prog.h(input_qubit[1])  # number=2
        prog.h(input_qubit[2])  # number=7
        prog.h(input_qubit[3])  # number=8
        prog.h(input_qubit[3]) # number=34
        prog.cz(input_qubit[4],input_qubit[3]) # number=35
        prog.h(input_qubit[3]) # number=36


        prog.h(input_qubit[0])  # number=38
        prog.cz(input_qubit[1],input_qubit[0])  # number=39
        prog.h(input_qubit[0])  # number=40
        prog.x(input_qubit[0])  # number=32
        prog.cx(input_qubit[1],input_qubit[0])  # number=33
        prog.cx(input_qubit[0],input_qubit[1])  # number=24
        prog.x(input_qubit[1])  # number=25
        prog.x(input_qubit[1]) # number=41
        prog.cx(input_qubit[0],input_qubit[1])  # number=26
        prog.x(input_qubit[2])  # number=11
        prog.cx(input_qubit[2],input_qubit[3]) # number=30
        prog.x(input_qubit[3])  # number=12
        prog.h(input_qubit[2]) # number=42

        if n>=2:
            prog.mcu1(pi,input_qubit[1:],input_qubit[0])

        prog.x(input_qubit[0])  # number=13
        prog.x(input_qubit[1])  # number=14
        prog.x(input_qubit[2])  # number=15
        prog.x(input_qubit[4]) # number=46
        prog.x(input_qubit[3])  # number=16


        prog.h(input_qubit[0])  # number=17
        prog.h(input_qubit[1])  # number=18
        prog.cx(input_qubit[0],input_qubit[2]) # number=43
        prog.x(input_qubit[2]) # number=44
        prog.h(input_qubit[2]) # number=47
        prog.cz(input_qubit[0],input_qubit[2]) # number=48
        prog.h(input_qubit[2]) # number=49
        prog.rx(-1.9697785938008003,input_qubit[1]) # number=37
        prog.h(input_qubit[2])  # number=19
        prog.h(input_qubit[3])  # number=20


    prog.x(input_qubit[1]) # number=22
    prog.x(input_qubit[1]) # number=23
    # circuit end

    for i in range(n):
        prog.measure(input_qubit[i], classical[i])


    return prog
Ejemplo n.º 36
0
def weather(arg):
    owm = pyowm.OWM(WeatherAPIKey)  # You MUST provide a valid API key
    fc = owm.three_hours_forecast('Nizhny Novgorod,ru')
    f = fc.get_forecast()
    get_weather_info = False
    day_counter = 0
    current_date = ""
    cleared = False
    temp_Log = ""
    Log = ""
    for weather in f:
        
        print(weather.get_reference_time('date'))
        if ( (arg == "завтра") and (day_counter == 1) and (not cleared)):
            Log = ""
            cleared = True
           
        if(current_date != str(weather.get_reference_time('date').date()).split('-')[2]):
            temp_Log = temp_Log + (str(weather.get_reference_time('date').date()).split('-')[2] + " " + months[str(weather.get_reference_time('date').date()).split('-')[1]] + ":\n")
            current_date = str(weather.get_reference_time('date').date()).split('-')[2]
        
        if (str(weather.get_reference_time('date').time()).split(':')[0] == "09"):
            temp_Log = temp_Log + ("\t🌝 Утро: ")
            get_weather_info = True
 
        if (str(weather.get_reference_time('date').time()).split(':')[0] == "15"):
            temp_Log = temp_Log + ("\t🌞 День: ")
            get_weather_info = True
                
        if (str(weather.get_reference_time('date').time()).split(':')[0] == "21"):
            temp_Log = temp_Log + ("\t🌛 Вечер: ")
            get_weather_info = True
            day_counter = day_counter + 1
 
        if (str(weather.get_reference_time('date').time()).split(':')[0] == "03"):
            temp_Log = temp_Log + ("\t🌚 Ночь: ")
            get_weather_info = True
 
        if (get_weather_info):   
            wind = "{0} {1}".format(weather.get_wind()['speed'], winddirArray[int(math.floor(weather.get_wind()['deg'] + 0.5) % 16)])
            humidity = weather.get_humidity()
            temp = weather.get_temperature('celsius')['temp']
            status = weather.get_status()
            try:
                weatherResult = "{0}°C {1} {2}, wind {3} m/s, humidity {4}%".format(temp, status,emojisDict[status], wind, humidity)
                temp_Log = temp_Log + weatherResult + "\n"
            except TypeError:
                weatherResult = "{0}°C {1}, wind {2} m/s, humidity {3}%".format(temp, status, wind,humidity)
                temp_Log = temp_Log + weatherResult + "\n"
           
            if (str(weather.get_reference_time('date').time()).split(':')[0] == "21"):
                Log = Log + temp_Log
                temp_Log = ""
           
            if ( (arg == "сегодня" or arg == "") and day_counter == 1):
                return Log,""
 
            if (arg == "завтра" and day_counter == 2):
                return Log,""
            get_weather_info = False
 

    if(arg == "прогноз"):
        return Log,""
def check_need_delay():
    global times_processed, start_time
    up_time = time.time() - start_time
    expected_times_processed = math.floor(up_time / PROCESS_INTERVAL)
    return times_processed - expected_times_processed > -2
Ejemplo n.º 38
0
def step_decay(epoch):
    lrate = base_lr * math.pow(gamma, math.floor(epoch / 20))
    print("Epoch:", epoch, "Learning rate:", lrate)
    return lrate
Ejemplo n.º 39
0
def is_square(num: int):
    if int(math.floor(math.sqrt(num))**2) == num:
        return True
    else:
        return False
Ejemplo n.º 40
0
    def add_scatter(
        self,
        name: str,
        data: Union[Dict, DataFrame],
        mapping: Dict = {
            "x": "x",
            "y": "y",
            "z": "z",
            "c": "c",
            "cs": "cs",
            "s": "s",
            "labels": "labels",
        },
        colormap: Union[str, Colormap, List[str], List[Colormap]] = "plasma",
        shader: str = "sphere",
        point_scale: float = 1.0,
        max_point_size: float = 100.0,
        fog_intensity: float = 0.0,
        saturation_limit: Union[float, List[float]] = 0.2,
        categorical: Union[bool, List[bool]] = False,
        interactive: bool = True,
        has_legend: bool = False,
        legend_title: Union[str, List[str]] = None,
        legend_labels: Union[Dict, List[Dict]] = None,
        min_legend_label: Union[str, float, List[str], List[float]] = None,
        max_legend_label: Union[str, float, List[str], List[float]] = None,
        series_title: Union[str, List[str]] = None,
        ondblclick: Union[str, List[str]] = None,
        selected_labels: Union[List, List[List]] = None,
        label_index: Union[int, List[int]] = 0,
        title_index: Union[int, List[int]] = 0,
    ):
        """Add a scatter layer to the plot.

        Arguments:
            name (:obj:`str`): The name of the layer
            data (:obj:`dict` or :obj:`DataFrame`): A Python dict or Pandas DataFrame containing the data

        Keyword Arguments:
            mapping (:obj:`dict`, optional): The keys which contain the data in the input dict or the column names in the pandas :obj:`DataFrame`
            colormap (:obj:`str`, :obj:`Colormap`, :obj:`List[str]`, or :obj:`List[Colormap]` optional): The name of the colormap (can also be a matplotlib Colormap object). A list when visualizing multiple series
            shader (:obj:`str`, optional): The name of the shader to use for the data point visualization
            point_scale (:obj:`float`, optional): The relative size of the data points
            max_point_size (:obj:`int`, optional): The maximum size of the data points when zooming in
            fog_intensity (:obj:`float`, optional): The intensity of the distance fog
            saturation_limit (:obj:`float` or :obj:`List[float]`, optional): The minimum saturation to avoid "gray soup". A list when visualizing multiple series
            categorical (:obj:`bool` or :obj:`List[bool]`, optional): Whether this scatter layer is categorical. A list when visualizing multiple series
            interactive (:obj:`bool`, optional): Whether this scatter layer is interactive
            has_legend (:obj:`bool`, optional): Whether or not to draw a legend
            legend_title (:obj:`str` or :obj:`List[str]`, optional): The title of the legend. A list when visualizing multiple series
            legend_labels (:obj:`Dict` or :obj:`List[Dict]`, optional): A dict mapping values to legend labels. A list when visualizing multiple series
            min_legend_label (:obj:`str`, :obj:`float`, :obj:`List[str]` or :obj:`List[float]`, optional): The label used for the miminum value in a ranged (non-categorical) legend. A list when visualizing multiple series
            max_legend_label (:obj:`str`, :obj:`float`, :obj:`List[str]` or :obj:`List[float]`, optional): The label used for the maximum value in a ranged (non-categorical) legend. A list when visualizing multiple series
            series_title (:obj:`str` or :obj:`List[str]`, optional): The name of the series (used when multiple properites supplied). A list when visualizing multiple series
            ondblclick (:obj:`str` or :obj:`List[str]`, optional): A JavaScript snippet that is executed on double-clicking on a data point. A list when visualizing multiple series
            selected_labels: (:obj:`Dict` or :obj:`List[Dict]`, optional): A list of label values to show in the selected box. A list when visualizing multiple series
            label_index: (:obj:`int` or :obj:`List[int]`, optional): The index of the label value to use as the actual label (when __ is used to specify multiple values). A list when visualizing multiple series
            title_index: (:obj:`int` or :obj:`List[int]`, optional): The index of the label value to use as the selected title (when __ is used to specify multiple values). A list when visualizing multiple series
        """
        if mapping["z"] not in data:
            data[mapping["z"]] = [0] * len(data[mapping["x"]])

        if "pandas" in type(data).__module__:
            data = data.to_dict("list")

        data_c = data[mapping["c"]]
        data_cs = data[mapping["c"]] if mapping["cs"] in data else None

        # Check whether the color ("c") are strings
        if type(data_c[0]) is str:
            raise ValueError('Strings are not valid values for "c".')

        # In case there are multiple series defined
        n_series = 1
        if isinstance(data_c[0], Iterable):
            n_series = len(data_c)
        else:
            data_c = [data_c]

        if data_cs is not None and not isinstance(data_cs[0], Iterable):
            data_cs = [data_cs]

        # Make everything a list that isn't one (or a tuple)
        colormap = Faerun.make_list(colormap)
        saturation_limit = Faerun.make_list(saturation_limit)
        categorical = Faerun.make_list(categorical)
        legend_title = Faerun.make_list(legend_title)
        legend_labels = Faerun.make_list(legend_labels, make_list_list=True)
        min_legend_label = Faerun.make_list(min_legend_label)
        max_legend_label = Faerun.make_list(max_legend_label)
        series_title = Faerun.make_list(series_title)
        ondblclick = Faerun.make_list(ondblclick)
        selected_labels = Faerun.make_list(selected_labels, make_list_list=True)
        label_index = Faerun.make_list(label_index)
        title_index = Faerun.make_list(title_index)

        # If any argument list is shorter than the number of series,
        # repeat the last element
        colormap = Faerun.expand_list(colormap, n_series)
        saturation_limit = Faerun.expand_list(saturation_limit, n_series)
        categorical = Faerun.expand_list(categorical, n_series)
        legend_title = Faerun.expand_list(legend_title, n_series, with_none=True)
        legend_labels = Faerun.expand_list(legend_labels, n_series, with_none=True)
        min_legend_label = Faerun.expand_list(
            min_legend_label, n_series, with_none=True
        )
        max_legend_label = Faerun.expand_list(
            max_legend_label, n_series, with_none=True
        )
        series_title = Faerun.expand_list(series_title, n_series, with_value="Series")
        ondblclick = Faerun.expand_list(ondblclick, n_series, with_none=True)
        selected_labels = Faerun.expand_list(selected_labels, n_series)
        label_index = Faerun.expand_list(label_index, n_series)
        title_index = Faerun.expand_list(title_index, n_series)

        # # The c and cs values in the data are a special case, as they should
        # # never be expanded
        # if type(data[mapping["c"]][0]) is not list and prop_len > 1:
        #     prop_len = 1
        # elif:
        #     prop_len = len(data[mapping["c"]])

        legend = [None] * n_series
        is_range = [None] * n_series
        min_c = [None] * n_series
        max_c = [None] * n_series

        for s in range(n_series):
            min_c[s] = float(min(data_c[s]))
            max_c[s] = float(max(data_c[s]))
            len_c = len(data_c[s])

            if min_legend_label[s] is None:
                min_legend_label[s] = min_c[s]

            if max_legend_label[s] is None:
                max_legend_label[s] = max_c[s]

            is_range[s] = False

            if legend_title[s] is None:
                legend_title[s] = name

            # Prepare the legend
            legend[s] = []
            if has_legend:
                legend_values = []
                if categorical[s]:
                    if legend_labels[s]:
                        legend_values = legend_labels[s]
                    else:
                        legend_values = [(i, str(i)) for i in sorted(set(data_c[s]))]
                else:
                    if legend_labels[s]:
                        legend_labels[s].reverse()
                        for value, label in legend_labels[s]:
                            legend_values.append(
                                [(value - min_c[s]) / (max_c[s] - min_c[s]), label]
                            )
                    else:
                        is_range[s] = True
                        for i, val in enumerate(np.linspace(1.0, 0.0, 99)):
                            legend_values.append(
                                [val, str(data_c[s][int(math.floor(len_c / 100 * i))])]
                            )

                cmap = None
                if isinstance(colormap[s], str):
                    cmap = plt.cm.get_cmap(colormap[s])
                else:
                    cmap = colormap[s]

                for value, label in legend_values:
                    legend[s].append([list(cmap(value)), label])

            # Normalize the data to later get the correct colour maps
            if not categorical[s]:
                data_c[s] = np.array(data_c[s])
                data_c[s] = (data_c[s] - min_c[s]) / (max_c[s] - min_c[s])

            if mapping["cs"] in data and len(data_cs) > s:
                data_cs[s] = np.array(data_cs[s])
                min_cs = min(data_cs[s])
                max_cs = max(data_cs[s])
                # Avoid zero saturation by limiting the lower bound to 0.1

                data_cs[s] = 1.0 - np.maximum(
                    saturation_limit[s],
                    np.array((data_cs[s] - min_cs) / (max_cs - min_cs)),
                )

            # Format numbers if parameters are indeed numbers
            if isinstance(min_legend_label[s], (int, float)):
                min_legend_label[s] = self.legend_number_format.format(
                    min_legend_label[s]
                )

            if isinstance(max_legend_label[s], (int, float)):
                max_legend_label[s] = self.legend_number_format.format(
                    max_legend_label[s]
                )

        data[mapping["c"]] = data_c
        if data_cs:
            data[mapping["cs"]] = data_cs

        self.scatters[name] = {
            "name": name,
            "shader": shader,
            "point_scale": point_scale,
            "max_point_size": max_point_size,
            "fog_intensity": fog_intensity,
            "interactive": interactive,
            "categorical": categorical,
            "mapping": mapping,
            "colormap": colormap,
            "has_legend": has_legend,
            "legend_title": legend_title,
            "legend": legend,
            "is_range": is_range,
            "min_c": min_c,
            "max_c": max_c,
            "min_legend_label": min_legend_label,
            "max_legend_label": max_legend_label,
            "series_title": series_title,
            "ondblclick": ondblclick,
            "selected_labels": selected_labels,
            "label_index": label_index,
            "title_index": title_index,
        }

        self.scatters_data[name] = data
def Q_Learning(Pr_des, eps_unc, N_EPISODES, SHOW_EVERY, LEARN_RATE, DISCOUNT,
               EPS_DECAY, epsilon, i_s, pa, energy_pa, pa2ts, pa_s, pa_t,
               act_num, possible_acts, possible_next_states, pick_up, delivery,
               test_n, m, n):

    wb = Workbook()
    sheet_name = 'Simulation' + str(test_n + 1)
    s1 = wb.add_sheet(sheet_name)

    s1.write(1, 0, 'Task-1')
    s1.write(6, 0, 'Task-2')
    s1.write(11, 0, 'Task-3')
    s1.write(16, 0, 'Task-4')

    s1.write(0, 1, '# of Hit')
    s1.write(0, 2, '# Avg. Reward')

    s1.write(0, 6, 'Total Run Time')
    s1.write(0, 7, 'Total Avg. Reward')

    inx = 0

    # Getting the non-accepting TS states

    QL_start_time = timeit.default_timer()

    EVERY_PATH = []
    episode_rewards = []

    # Initialize the Q - table (Between -0.01 and 0)
    pa_size = []
    q_table = []
    agent_s = []
    hit_count = []
    mission_tracker = []
    ep_per_task = []
    for i in range(len(energy_pa)):
        pa_size.append(len(pa[i].g.nodes()))
        q_table.append(np.random.rand(m * n, 9) * 0.001 -
                       0.001)  # of states x # of actions
        agent_s.append(i_s[i])  # Initialize the agent's location
        hit_count.append(0)
        mission_tracker.append(0)
        ep_per_task.append([])

    ep_rewards = []
    ep_trajectories_pa = []

    agent_upt_i = []
    agent_upt = []
    for j in range(len(energy_pa)):
        for i in range(len(pa[j].g.nodes())):
            if pa[j].g.nodes()[i][1] == 0 or str(pa[j].g.nodes(
            )[i][0]) == 'r' + str(
                    pick_up[j]
            ):  #or str(pa[j].g.nodes()[i][0]) == 'r'+str(delivery[j]): # If the mission changes check here
                agent_upt_i.append(pa2ts[j][i])
            else:
                agent_upt_i.append([])
        agent_upt.append(agent_upt_i)

    for episode in range(N_EPISODES):
        # if episode > 8000:
        # 	epsilon = 0

        which_pd = np.random.randint(
            len(energy_pa))  # randomly chosing the pick_up delivery states
        mission_tracker[which_pd] = mission_tracker[which_pd] + 1
        hit = []
        ep_rew = []
        for i in range(len(energy_pa)):
            hit.append(0)

        ep_traj_pa = [agent_s[which_pd]]  # Initialize the episode trajectory
        ep_rew = 0  # Initialize the total episode reward
        possible_next_states_copy = copy.deepcopy(
            possible_next_states[which_pd])
        possible_acts_copy = copy.deepcopy(possible_acts[which_pd])

        for t_ep in range(ep_len):

            k_ep = ep_len - t_ep  # Remaning episode time
            if hit[which_pd] == 0:
                #print(which_pd)
                #print(agent_s[which_pd])
                if energy_pa[which_pd][agent_s[
                        which_pd]] == 0:  # Raise the 'hit flag' if the mission is achieved
                    hit[which_pd] = 1  # re-initialize the agent_s to prevent stuck
                    agent_s[which_pd] = agent_upt[which_pd].index(
                        pa2ts[which_pd]
                        [agent_s[which_pd]])  # Reinitiliaze the pa(region, 0)
                    hit_count[which_pd] = hit_count[which_pd] + 1
                    #break

                en_list = [
                    energy_pa[which_pd][i]
                    for i in possible_next_states_copy[agent_s[which_pd]]
                ]  # Energies of the next possible states
                not_possible_index = []
                ind_minholder = en_list.index(
                    min(en_list))  #np.argmin(np.array(en_list))
                possible_next_states_minholder = possible_next_states_copy[
                    agent_s[which_pd]][ind_minholder]
                possible_acts_minholder = possible_acts_copy[
                    agent_s[which_pd]][ind_minholder]
                for j in range(
                        len(possible_next_states_copy[agent_s[which_pd]])):
                    d_max = en_list[j] + 1
                    i_max = int(math.floor((k_ep - 1 - d_max) / 2))
                    thr_fun = 0
                    for i in range(i_max + 1):
                        thr_fun = thr_fun + np.math.factorial(k_ep) / (
                            np.math.factorial(k_ep - i) * np.math.factorial(i)
                        ) * eps_unc**i * (1 - eps_unc)**(k_ep - i)
                    if thr_fun > Pr_des or i_max < 0:  #energy_pa[possible_next_states_copy[agent_s][j]] > k_ep-2: #
                        not_possible_index.append(j)
                #print("r1pos = " + str(len(possible_next_states_copy[agent_s[which_pd]])))
                for ind in sorted(not_possible_index, reverse=True):
                    del possible_next_states_copy[agent_s[which_pd]][ind]
                    del possible_acts_copy[agent_s[which_pd]][ind]
                #print("r2pos = " + str(len(possible_next_states_copy[agent_s[which_pd]])))
            if len(possible_next_states_copy[agent_s[which_pd]]
                   ) == 0:  # not possible_next_states_copy[agent_s]: #
                possible_next_states_copy[agent_s[which_pd]].append(
                    possible_next_states_minholder)
                possible_acts_copy[agent_s[which_pd]].append(
                    possible_acts_minholder)

            if np.random.uniform() > epsilon:  # Exploit
                possible_qs = q_table[which_pd][pa2ts[which_pd][
                    agent_s[which_pd]], possible_acts_copy[agent_s[
                        which_pd]]]  # Possible Q values for each action
                next_ind = np.argmax(
                    possible_qs)  # Pick the action with max Q value
            else:  # Explore
                next_ind = np.random.randint(
                    len(possible_acts_copy[
                        agent_s[which_pd]]))  # Picking a random action
            # Taking the action
            real_agent_s = agent_s[which_pd]
            real_phy_agent = pa2ts[which_pd][agent_s[which_pd]]
            real_action = possible_acts_copy[real_agent_s][next_ind]
            if np.random.uniform() < eps_unc:
                [chosen_act, next_state] = action_uncertainity(
                    possible_acts_copy[agent_s[which_pd]][next_ind],
                    pa_s[which_pd], pa_t[which_pd], act_num[which_pd],
                    agent_s[which_pd])
                action = chosen_act
                s_a = (agent_s[which_pd], action)  # State & Action pair
                #current_q = q_table[which_pd][agent_s[which_pd], action]                             # (save the current q for the q_table update later on)
                agent_s[
                    which_pd] = next_state  # possible_next_states[agent_upt.index(pa2ts[agent_s])][next_ind]        # moving to next state  (s,a)
            else:
                action = possible_acts_copy[agent_s[which_pd]][next_ind]
                s_a = (agent_s[which_pd], action)  # State & Action pair
                #current_q = q_table[which_pd][agent_s[which_pd], action]                             # (save the current q for the q_table update later on)
                agent_s[which_pd] = possible_next_states_copy[
                    agent_s[which_pd]][next_ind]  # moving to next state  (s,a)

            ep_traj_pa.append(agent_s[which_pd])
            current_q = q_table[which_pd][real_phy_agent, real_action]
            max_future_q = np.amax(
                q_table[which_pd][pa2ts[which_pd][agent_s[which_pd]], :]
            )  # Find the max future q
            rew_obs = rewards_pa[which_pd][agent_s[
                which_pd]]  #np.random.binomial(1, 1-rew_uncertainity) *     # Observe the rewards of the next state
            new_q = (1 - LEARN_RATE) * current_q + LEARN_RATE * (
                rew_obs + DISCOUNT * max_future_q)
            q_table[which_pd][real_phy_agent, real_action] = new_q
            # print("episode = " + str(episode))
            for i in range(len(energy_pa)):
                if i != which_pd:
                    hypo_agent_s = agent_s[i]
                    hypo_phy_agent = pa2ts[i][agent_s[i]]
                    agent_s[i] = agent_upt[i].index(
                        pa2ts[which_pd][agent_s[which_pd]])
                    current_q = q_table[i][hypo_phy_agent, real_action]
                    max_future_q = np.amax(q_table[i][pa2ts[i][agent_s[i]], :])
                    new_q = (1 - LEARN_RATE) * current_q + LEARN_RATE * (
                        rew_obs + DISCOUNT * max_future_q
                    )  # Calculate the new q value
                    q_table[i][hypo_phy_agent,
                               real_action] = new_q  # Update the table

            ep_rew += rew_obs
        agent_s[which_pd] = agent_upt[which_pd].index(
            pa2ts[which_pd][agent_s[which_pd]])
        # for i in range(len(energy_pa)):
        # 	agent_s[i] = agent_upt[i].index(pa2ts[i][agent_s[i]])# Reinitiliaze the pa(region, 0)
        ep_rewards.append(ep_rew)
        ep_trajectories_pa.append(ep_traj_pa)
        epsilon = epsilon * EPS_DECAY
        ep_per_task[which_pd].append(ep_rew)
        if (episode + 1) % 10000 == 0:
            inx = inx + 1
            for ind in range(len(energy_pa)):
                avg_per_task = np.mean(ep_per_task[ind])
                print('Episode # ' + str(episode + 1) + ' : Task-' + str(ind) +
                      '   # of Hit=' + str(len(ep_per_task[ind])) +
                      '   Avg.=' + str(avg_per_task))
                s1.write(ind * N_EPISODES / 10000 + inx, 1,
                         len(ep_per_task[ind]))
                s1.write(ind * N_EPISODES / 10000 + inx, 2, avg_per_task)

        if (episode + 1) % SHOW_EVERY == 0:
            avg_rewards = np.mean(ep_rewards[episode - SHOW_EVERY + 1:episode])
            print('Episode # ' + str(episode + 1) + ' : Epsilon=' +
                  str(round(epsilon, 4)) + '    Avg. reward in the last ' +
                  str(SHOW_EVERY) + ' episodes=' + str(round(avg_rewards, 2)))

    best_episode_index = ep_rewards.index(max(ep_rewards))
    optimal_policy_pa = ep_trajectories_pa[
        N_EPISODES -
        1]  #ep_trajectories_pa[best_episode_index] # Optimal policy in pa  ep_trajectories_pa[N_EPISODES-1]#
    optimal_policy_ts = []  # optimal policy in ts
    opt_pol = []  # optimal policy in (m, n, h) format for visualization
    for ind, val in enumerate(optimal_policy_pa):
        optimal_policy_ts.append(pa2ts[which_pd][val])
        opt_pol.append((math.floor(optimal_policy_ts[ind] / n),
                        optimal_policy_ts[ind] % n, 0))

    print('\n Tajectory at the last episode : ' + str(optimal_policy_ts))
    QL_timecost = timeit.default_timer() - QL_start_time
    success_ratio = []
    for i in range(len(energy_pa)):
        success_ratio.append(100 * hit_count[i] / mission_tracker[i])
        print("Successful Mission Ratio[%] = " + str(success_ratio[i]))
        print("Successful Missions = " + str(hit_count[i]) + " out of " +
              str(mission_tracker[i]))
    d_maxs = []
    for i in range(len(energy_pa)):
        d_maxs.append(max(energy_pa[i]))
    max_energy = max(d_maxs)

    print('\n Total time for Q-Learning : ' + str(QL_timecost) + ' seconds' +
          "\n")
    print('Action uncertainity[%] = ' + str(eps_unc * 100))
    print("Desired Minimum Success Ratio[%] = " + str(100 * Pr_des))
    print("Episode Length = " + str(ep_len) +
          "  and  Max. Energy of the System = " + str(max_energy) + "\n")

    s1.write(1, 6, QL_timecost)
    s1.write(1, 7, np.mean(ep_rewards))
    #filename = 'testing'+str(test_n+1) +'.xls'
    filename = 'testing_million_stable_one_mission.xls'
    wb.save(filename)
    return opt_pol
Ejemplo n.º 42
0
def get_msdnet_cifar10(blocks,
                       model_name=None,
                       pretrained=False,
                       root=os.path.join('~', '.torch', 'models'),
                       **kwargs):
    """
    Create MSDNet model for CIFAR-10 with specific parameters.

    Parameters:
    ----------
    blocks : int
        Number of blocks.
    model_name : str or None, default None
        Model name for loading pretrained model.
    pretrained : bool, default False
        Whether to load the pretrained weights for model.
    root : str, default '~/.torch/models'
        Location for keeping the model parameters.
    """

    assert (blocks == 22)

    num_scales = 3
    num_feature_blocks = 10
    base = 4
    step = 2
    reduction_rate = 0.5
    growth = 6
    growth_factor = [1, 2, 4, 4]
    use_bottleneck = True
    bottleneck_factor_per_scales = [1, 2, 4, 4]

    assert (reduction_rate > 0.0)
    init_layer_channels = [16 * c for c in growth_factor[:num_scales]]

    step_mode = "even"
    layers_per_subnets = [base]
    for i in range(num_feature_blocks - 1):
        layers_per_subnets.append(step if step_mode == 'even' else step * i +
                                  1)
    total_layers = sum(layers_per_subnets)

    interval = math.ceil(total_layers / num_scales)
    global_layer_ind = 0

    channels = []
    bottleneck_factors = []

    in_channels_tmp = init_layer_channels
    in_scales = num_scales
    for i in range(num_feature_blocks):
        layers_per_subnet = layers_per_subnets[i]
        scales_i = []
        channels_i = []
        bottleneck_factors_i = []
        for j in range(layers_per_subnet):
            out_scales = int(num_scales -
                             math.floor(global_layer_ind / interval))
            global_layer_ind += 1
            scales_i += [out_scales]
            scale_offset = num_scales - out_scales

            in_dec_scales = num_scales - len(in_channels_tmp)
            out_channels = [
                in_channels_tmp[scale_offset - in_dec_scales + k] +
                growth * growth_factor[scale_offset + k]
                for k in range(out_scales)
            ]
            in_dec_scales = num_scales - len(in_channels_tmp)
            bottleneck_factors_ij = bottleneck_factor_per_scales[
                in_dec_scales:][:len(in_channels_tmp)]

            in_channels_tmp = out_channels
            channels_i += [out_channels]
            bottleneck_factors_i += [bottleneck_factors_ij]

            if in_scales > out_scales:
                assert (in_channels_tmp[0] % growth_factor[scale_offset] == 0)
                out_channels1 = int(
                    math.floor(in_channels_tmp[0] /
                               growth_factor[scale_offset] * reduction_rate))
                out_channels = [
                    out_channels1 * growth_factor[scale_offset + k]
                    for k in range(out_scales)
                ]
                in_channels_tmp = out_channels
                channels_i += [out_channels]
                bottleneck_factors_i += [[]]
            in_scales = out_scales

        in_scales = scales_i[-1]
        channels += [channels_i]
        bottleneck_factors += [bottleneck_factors_i]

    net = CIFAR10MSDNet(channels=channels,
                        init_layer_channels=init_layer_channels,
                        num_feature_blocks=num_feature_blocks,
                        use_bottleneck=use_bottleneck,
                        bottleneck_factors=bottleneck_factors,
                        **kwargs)

    if pretrained:
        if (model_name is None) or (not model_name):
            raise ValueError(
                "Parameter `model_name` should be properly initialized for loading pretrained model."
            )
        from .model_store import download_model
        download_model(net=net,
                       model_name=model_name,
                       local_model_store_dir_path=root)

    return net
Ejemplo n.º 43
0
def update_flags(flags):
  """Update flags with new parameters.

  Args:
    flags: All model and data parameters

  Returns:
    Updated flags

  Raises:
    ValueError: If the preprocessing mode isn't recognized.
  """

  label_count = len(
      input_data.prepare_words_list(flags.wanted_words.split(',')))
  desired_samples = int(flags.sample_rate * flags.clip_duration_ms /
                        MS_PER_SECOND)
  window_size_samples = int(flags.sample_rate * flags.window_size_ms /
                            MS_PER_SECOND)
  window_stride_samples = int(flags.sample_rate * flags.window_stride_ms /
                              MS_PER_SECOND)
  length_minus_window = (desired_samples - window_size_samples)
  if length_minus_window < 0:
    spectrogram_length = 0
  else:
    spectrogram_length = 1 + int(length_minus_window / window_stride_samples)
  if flags.preprocess == 'raw':
    average_window_width = -1
    fingerprint_width = desired_samples
    spectrogram_length = 1
  elif flags.preprocess == 'average':
    fft_bin_count = 1 + (utils.next_power_of_two(window_size_samples) / 2)
    average_window_width = int(
        math.floor(fft_bin_count / flags.feature_bin_count))
    fingerprint_width = int(
        math.ceil(float(fft_bin_count) / average_window_width))
  elif flags.preprocess == 'mfcc':
    average_window_width = -1
    fingerprint_width = flags.feature_bin_count
  elif flags.preprocess == 'micro':
    average_window_width = -1
    fingerprint_width = flags.feature_bin_count
  else:
    raise ValueError('Unknown preprocess mode "%s" (should be "mfcc",'
                     ' "average", or "micro")' % (flags.preprocess))

  fingerprint_size = fingerprint_width * spectrogram_length

  upd_flags = flags
  upd_flags.mode = Modes.TRAINING
  upd_flags.label_count = label_count
  upd_flags.desired_samples = desired_samples
  upd_flags.window_size_samples = window_size_samples
  upd_flags.window_stride_samples = window_stride_samples
  upd_flags.spectrogram_length = spectrogram_length
  upd_flags.fingerprint_width = fingerprint_width
  upd_flags.fingerprint_size = fingerprint_size
  upd_flags.average_window_width = average_window_width

  # summary logs for TensorBoard
  upd_flags.summaries_dir = os.path.join(flags.train_dir, 'logs/')
  return upd_flags
Ejemplo n.º 44
0
    def page(self, number, *args, **kwargs):
        """Return a standard ``Page`` instance with custom, digg-specific
        page ranges attached.
        """

        page = super(DiggPaginator, self).page(number, *args, **kwargs)
        number = int(number)  # we know this will work

        # easier access
        num_pages, body, tail, padding, margin = \
            self.num_pages, self.body, self.tail, self.padding, self.margin

        # put active page in middle of main range
        main_range = map(
            int,
            [
                math.floor(number - body / 2.0) +
                1,  # +1 = shift odd body to right
                math.floor(number + body / 2.0)
            ])
        # adjust bounds
        if main_range[0] < 1:
            main_range = map(abs(main_range[0] - 1).__add__, main_range)
        if main_range[1] > num_pages:
            main_range = map((num_pages - main_range[1]).__add__, main_range)

        # Determine leading and trailing ranges; if possible and appropriate,
        # combine them with the main range, in which case the resulting main
        # block might end up considerable larger than requested. While we
        # can't guarantee the exact size in those cases, we can at least try
        # to come as close as possible: we can reduce the other boundary to
        # max padding, instead of using half the body size, which would
        # otherwise be the case. If the padding is large enough, this will
        # of course have no effect.
        # Example:
        #     total pages=100, page=4, body=5, (default padding=2)
        #     1 2 3 [4] 5 6 ... 99 100
        #     total pages=100, page=4, body=5, padding=1
        #     1 2 3 [4] 5 ... 99 100
        # If it were not for this adjustment, both cases would result in the
        # first output, regardless of the padding value.
        if main_range[0] <= tail + margin:
            leading = []
            main_range = [1, max(body, min(number + padding, main_range[1]))]
            main_range[0] = 1
        else:
            leading = range(1, tail + 1)
        # basically same for trailing range, but not in ``left_align`` mode
        if self.align_left:
            trailing = []
        else:
            if main_range[1] >= num_pages - (tail + margin) + 1:
                trailing = []
                if not leading:
                    # ... but handle the special case of neither leading nor
                    # trailing ranges; otherwise, we would now modify the
                    # main range low bound, which we just set in the previous
                    # section, again.
                    main_range = [1, num_pages]
                else:
                    main_range = [
                        min(num_pages - body + 1,
                            max(number - padding, main_range[0])), num_pages
                    ]
            else:
                trailing = range(num_pages - tail + 1, num_pages + 1)

        # finally, normalize values that are out of bound; this basically
        # fixes all the things the above code screwed up in the simple case
        # of few enough pages where one range would suffice.
        main_range = [max(main_range[0], 1), min(main_range[1], num_pages)]

        # make the result of our calculations available as custom ranges
        # on the ``Page`` instance.
        page.main_range = range(main_range[0], main_range[1] + 1)
        page.leading_range = leading
        page.trailing_range = trailing
        page.page_range = reduce(
            lambda x, y: x + ((x and y) and [False]) + y,
            [page.leading_range, page.main_range, page.trailing_range])

        page.__class__ = DiggPage
        return page
Ejemplo n.º 45
0
    async def create_filter(self, flags, ctx, order_by=None):
        aggregations = []

        if "mine" in flags and flags["mine"]:
            aggregations.append({"$match": {"user_id": ctx.author.id}})

        if "bids" in flags and flags["bids"]:
            aggregations.append({"$match": {"bidder_id": ctx.author.id}})

        rarity = []
        for x in ("mythical", "legendary", "ub"):
            if x in flags and flags[x]:
                rarity += getattr(self.bot.data, f"list_{x}")
        if rarity:
            aggregations.append({"$match": {"pokemon.species_id": {"$in": rarity}}})

        for x in ("alolan", "mega", "event"):
            if x in flags and flags[x]:
                aggregations.append(
                    {"$match": {"pokemon.species_id": {"$in": getattr(self.bot.data, f"list_{x}")}}}
                )

        if "type" in flags and flags["type"]:
            all_species = [i for x in flags["type"] for i in self.bot.data.list_type(x)]
            aggregations.append({"$match": {"pokemon.species_id": {"$in": all_species}}})

        if "region" in flags and flags["region"]:
            all_species = [i for x in flags["region"] for i in self.bot.data.list_region(x)]
            aggregations.append({"$match": {"pokemon.species_id": {"$in": all_species}}})

        if "favorite" in flags and flags["favorite"]:
            aggregations.append({"$match": {"pokemon.favorite": True}})

        if "shiny" in flags and flags["shiny"]:
            aggregations.append({"$match": {"pokemon.shiny": True}})

        if "name" in flags and flags["name"] is not None:
            all_species = [
                i for x in flags["name"] for i in self.bot.data.find_all_matches(" ".join(x))
            ]

            aggregations.append({"$match": {"pokemon.species_id": {"$in": all_species}}})

        if "nickname" in flags and flags["nickname"] is not None:
            aggregations.append(
                {
                    "$match": {
                        "pokemon.nickname": {
                            "$regex": "("
                            + ")|(".join(" ".join(x) for x in flags["nickname"])
                            + ")",
                            "$options": "i",
                        }
                    }
                }
            )

        if "embedcolor" in flags and flags["embedcolor"]:
            aggregations.append({"$match": {"pokemon.has_color": True}})

        if "ends" in flags and flags["ends"] is not None:
            aggregations.append({"$match": {"ends": {"$lt": datetime.utcnow() + flags["ends"]}}})

        # Numerical flags

        for flag, expr in constants.FILTER_BY_NUMERICAL.items():
            for text in flags[flag] or []:
                ops = self.parse_numerical_flag(text)

                if ops is None:
                    raise commands.BadArgument(f"Couldn't parse `--{flag} {' '.join(text)}`")

                ops[1] = float(ops[1])

                if flag == "iv":
                    ops[1] = float(ops[1]) * 186 / 100

                if ops[0] == "<":
                    aggregations.append(
                        {"$match": {expr: {"$lt": math.ceil(ops[1])}}},
                    )
                elif ops[0] == "=":
                    aggregations.append(
                        {"$match": {expr: {"$eq": round(ops[1])}}},
                    )
                elif ops[0] == ">":
                    aggregations.append(
                        {"$match": {expr: {"$gt": math.floor(ops[1])}}},
                    )

        for flag, amt in constants.FILTER_BY_DUPLICATES.items():
            if flag in flags and flags[flag] is not None:
                iv = int(flags[flag])

                # Processing combinations
                combinations = [
                    {field: iv for field in combo}
                    for combo in itertools.combinations(constants.IV_FIELDS, amt)
                ]
                aggregations.append({"$match": {"$or": combinations}})

        if order_by is not None:
            s = order_by[-1]
            if order_by[-1] in "+-":
                order_by, asc = order_by[:-1], 1 if s == "+" else -1
            else:
                asc = -1 if order_by in constants.DEFAULT_DESCENDING else 1

            aggregations.append({"$sort": {constants.SORTING_FUNCTIONS[order_by]: asc}})

        if "skip" in flags and flags["skip"] is not None:
            aggregations.append({"$skip": flags["skip"]})

        if "limit" in flags and flags["limit"] is not None:
            aggregations.append({"$limit": flags["limit"]})

        return aggregations
    ##### System Inputs for Q-Learning #####
    N_EPISODES = 100000  # of episodes
    SHOW_EVERY = 10000  # Print out the info at every ... episode
    LEARN_RATE = 0.1
    DISCOUNT = 0.95
    EPS_DECAY = 1  #0.99989
    epsilon = 0.1  # 0.95
    eps_unc = 0.10  # Uncertainity in actions
    Pr_des = 0.95

    # Check possible minimum threshold
    d_maxs = []
    for i in range(len(energy_pa)):
        d_maxs.append(max(energy_pa[i]))
    d_max = max(d_maxs)
    i_max = int(math.floor((ep_len - 1 - d_max) / 2))
    thr_fun = 0
    for i in range(i_max + 1):
        thr_fun = thr_fun + np.math.factorial(ep_len) / (
            np.math.factorial(ep_len - i) *
            np.math.factorial(i)) * eps_unc**i * (1 - eps_unc)**(ep_len - i)
    print('Maximum threshold that can be put ' + str(thr_fun))
    if thr_fun < Pr_des:
        print('Please set a less desired probability threshold than ' +
              str(thr_fun))
    else:
        # Call the Q_Learning Function
        for test_n in range(1):
            opt_pol = Q_Learning(Pr_des, eps_unc, N_EPISODES, SHOW_EVERY,
                                 LEARN_RATE, DISCOUNT, EPS_DECAY, epsilon, i_s,
                                 pa, energy_pa, pa2ts, pa_s, pa_t, act_num,
Ejemplo n.º 47
0
a=1.2
b=2.0
print a.is_integer(),b.is_integer()

import math
print round(a), math.ceil(a), math.floor(a)
Ejemplo n.º 48
0
curs.init_pair(1, 7, -1)
curs.init_pair(2, 0, 3)
curs.init_pair(3, 3, -1)

select_idx = 0
key = 0
key_moveup = 106
key_movedown = 107
key_enter = 10
key_q = 113
key_v = 118
key_d = 100
key_u = 117
key_e = 101

lines_mid = math.floor(LINES/2)
title = "P-R-O-J-E-C-T-S"

def move(dire, num, wrap):
    wrapLen = len(wrap)
    if dire == 'up':
        num = (num + 1)%wrapLen
    elif dire == 'down':
        num = (num - 1)%wrapLen
    return num


def select(selected, cords, string):
    if selected:
        win.scr.addstr(cords[0], cords[1], string, curs.color_pair(2))
    else:
    def forward(self,  # type: ignore
                text: Dict[str, torch.LongTensor],
                spans: torch.IntTensor,
                span_labels: torch.IntTensor = None,
                metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
        # pylint: disable=arguments-differ
        """
        Parameters
        ----------
        text : ``Dict[str, torch.LongTensor]``, required.
            The output of a ``TextField`` representing the text of
            the document.
        spans : ``torch.IntTensor``, required.
            A tensor of shape (batch_size, num_spans, 2), representing the inclusive start and end
            indices of candidate spans for mentions. Comes from a ``ListField[SpanField]`` of
            indices into the text of the document.
        span_labels : ``torch.IntTensor``, optional (default = None).
            A tensor of shape (batch_size, num_spans), representing the cluster ids
            of each span, or -1 for those which do not appear in any clusters.
        metadata : ``List[Dict[str, Any]]``, optional (default = None).
            A metadata dictionary for each instance in the batch. We use the "original_text" and "clusters" keys
            from this dictionary, which respectively have the original text and the annotated gold coreference
            clusters for that instance.
        Returns
        -------
        An output dictionary consisting of:
        top_spans : ``torch.IntTensor``
            A tensor of shape ``(batch_size, num_spans_to_keep, 2)`` representing
            the start and end word indices of the top spans that survived the pruning stage.
        antecedent_indices : ``torch.IntTensor``
            A tensor of shape ``(num_spans_to_keep, max_antecedents)`` representing for each top span
            the index (with respect to top_spans) of the possible antecedents the model considered.
        predicted_antecedents : ``torch.IntTensor``
            A tensor of shape ``(batch_size, num_spans_to_keep)`` representing, for each top span, the
            index (with respect to antecedent_indices) of the most likely antecedent. -1 means there
            was no predicted link.
        loss : ``torch.FloatTensor``, optional
            A scalar loss to be optimised.
        """
        import pdb; pdb.set_trace()
        # Shape: (batch_size, document_length, embedding_size)
        if len(text) <= 512:
            text_embeddings = self._lexical_dropout(self._text_field_embedder(text))
        else:
            # sliding window approach
            final_representations = []
            current_text = self._text_field_embedder(text[0])
            final_representations.append(current_text[:412])
            for text_fragment in text[1:]:
                next_text = self._text_field_embedder(text_fragment)
                # average the 100. 
                averaged = torch.mean(torch.stack([current_text[412:], next_text[:100]]), dim=0)
                final_representations = torch.cat([final_representations, averaged])
                current_text = next_text
            final_representations = torch.cat([final_representations, current_text[-100:]])
            text_embeddings = final_representations  # should be [num_sentence, dim_size]

        document_length = text_embeddings.size(1)
        num_spans = spans.size(1)

        # Shape: (batch_size, document_length)
        text_mask = util.get_text_field_mask(text).float()

        # Shape: (batch_size, num_spans)
        span_mask = (spans[:, :, 0] >= 0).squeeze(-1).float()
        # SpanFields return -1 when they are used as padding. As we do
        # some comparisons based on span widths when we attend over the
        # span representations that we generate from these indices, we
        # need them to be <= 0. This is only relevant in edge cases where
        # the number of spans we consider after the pruning stage is >= the
        # total number of spans, because in this case, it is possible we might
        # consider a masked span.
        # Shape: (batch_size, num_spans, 2)
        spans = F.relu(spans.float()).long()

        # Shape: (batch_size, document_length, encoding_dim)
        contextualized_embeddings = self._context_layer(text_embeddings, text_mask)
        # Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size)
        endpoint_span_embeddings = self._endpoint_span_extractor(contextualized_embeddings, spans)
        # Shape: (batch_size, num_spans, emebedding_size)
        attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans)

        # Shape: (batch_size, num_spans, emebedding_size + 2 * encoding_dim + feature_size)
        span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1)

        # Prune based on mention scores.
        num_spans_to_keep = int(math.floor(self._spans_per_word * document_length))

        (top_span_embeddings, top_span_mask,
         top_span_indices, top_span_mention_scores) = self._mention_pruner(span_embeddings,
                                                                           span_mask,
                                                                           num_spans_to_keep)
        top_span_mask = top_span_mask.unsqueeze(-1)
        # Shape: (batch_size * num_spans_to_keep)
        # torch.index_select only accepts 1D indices, but here
        # we need to select spans for each element in the batch.
        # This reformats the indices to take into account their
        # index into the batch. We precompute this here to make
        # the multiple calls to util.batched_index_select below more efficient.
        flat_top_span_indices = util.flatten_and_batch_shift_indices(top_span_indices, num_spans)

        # Compute final predictions for which spans to consider as mentions.
        # Shape: (batch_size, num_spans_to_keep, 2)
        top_spans = util.batched_index_select(spans,
                                              top_span_indices,
                                              flat_top_span_indices)

        # Compute indices for antecedent spans to consider.
        max_antecedents = min(self._max_antecedents, num_spans_to_keep)

        # Now that we have our variables in terms of num_spans_to_keep, we need to
        # compare span pairs to decide each span's antecedent. Each span can only
        # have prior spans as antecedents, and we only consider up to max_antecedents
        # prior spans. So the first thing we do is construct a matrix mapping a span's
        #  index to the indices of its allowed antecedents. Note that this is independent
        #  of the batch dimension - it's just a function of the span's position in
        # top_spans. The spans are in document order, so we can just use the relative
        # index of the spans to know which other spans are allowed antecedents.

        # Once we have this matrix, we reformat our variables again to get embeddings
        # for all valid antecedents for each span. This gives us variables with shapes
        #  like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which
        #  we can use to make coreference decisions between valid span pairs.

        # Shapes:
        # (num_spans_to_keep, max_antecedents),
        # (1, max_antecedents),d
        # (1, num_spans_to_keep, max_antecedents)
        valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask = \
            self._generate_valid_antecedents(num_spans_to_keep, max_antecedents, util.get_device_of(text_mask))
        # Select tensors relating to the antecedent spans.
        # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
        candidate_antecedent_embeddings = util.flattened_index_select(top_span_embeddings,
                                                                      valid_antecedent_indices)

        # Shape: (batch_size, num_spans_to_keep, max_antecedents)
        candidate_antecedent_mention_scores = util.flattened_index_select(top_span_mention_scores,
                                                                          valid_antecedent_indices).squeeze(-1)
        # Compute antecedent scores.
        # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
        span_pair_embeddings = self._compute_span_pair_embeddings(top_span_embeddings,
                                                                  candidate_antecedent_embeddings,
                                                                  valid_antecedent_offsets)
        # Shape: (batch_size, num_spans_to_keep, 1 + max_antecedents)
        coreference_scores = self._compute_coreference_scores(span_pair_embeddings,
                                                              top_span_mention_scores,
                                                              candidate_antecedent_mention_scores,
                                                              valid_antecedent_log_mask)

        # We now have, for each span which survived the pruning stage,
        # a predicted antecedent. This implies a clustering if we group
        # mentions which refer to each other in a chain.
        # Shape: (batch_size, num_spans_to_keep)
        _, predicted_antecedents = coreference_scores.max(2)
        # Subtract one here because index 0 is the "no antecedent" class,
        # so this makes the indices line up with actual spans if the prediction
        # is greater than -1.
        predicted_antecedents -= 1

        output_dict = {"top_spans": top_spans,
                       "antecedent_indices": valid_antecedent_indices,
                       "predicted_antecedents": predicted_antecedents}
        if span_labels is not None:
            # Find the gold labels for the spans which we kept.
            pruned_gold_labels = util.batched_index_select(span_labels.unsqueeze(-1),
                                                           top_span_indices,
                                                           flat_top_span_indices)

            antecedent_labels = util.flattened_index_select(pruned_gold_labels,
                                                            valid_antecedent_indices).squeeze(-1)
            antecedent_labels += valid_antecedent_log_mask.long()

            # Compute labels.
            # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
            gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels,
                                                                          antecedent_labels)
            # Now, compute the loss using the negative marginal log-likelihood.
            # This is equal to the log of the sum of the probabilities of all antecedent predictions
            # that would be consistent with the data, in the sense that we are minimising, for a
            # given span, the negative marginal log likelihood of all antecedents which are in the
            # same gold cluster as the span we are currently considering. Each span i predicts a
            # single antecedent j, but there might be several prior mentions k in the same
            # coreference cluster that would be valid antecedents. Our loss is the sum of the
            # probability assigned to all valid antecedents. This is a valid objective for
            # clustering as we don't mind which antecedent is predicted, so long as they are in
            #  the same coreference cluster.
            coreference_log_probs = util.masked_log_softmax(coreference_scores, top_span_mask)
            correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log()
            negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum()

            self._mention_recall(top_spans, metadata)
            self._conll_coref_scores(top_spans, valid_antecedent_indices, predicted_antecedents, metadata)

            output_dict["loss"] = negative_marginal_log_likelihood

        if metadata is not None:
            output_dict["document"] = [x["original_text"] for x in metadata]
        return output_dict
 def from_ability_score(cls, value):
     if value < 1 or value > 30:
         raise ValueError("Ability scores must be between 1 and 30 inclusive")
     return cls(math.floor((value-10)/2))
Ejemplo n.º 51
0
ang_B/=360

A_time=0
B_time=0

Art=2*pi*r1/abs(s1)
Brt=2*pi*r2/abs(s2)

Af=2*pi*r1*ang_A/abs(s1)
Bf=2*pi*r2*ang_B/abs(s2)
print(Af,Bf)
Alis=list()
Blis=list()

if s1>0 and t>Af:
    Alis.append(math.floor(Af))
    A_time+=Af

if s2>0 and t>Bf:
    Blis.append(math.floor(Bf))
    B_time+=Bf
r=0
while(A_time<t  or B_time<t):
    if not r%2:
        A_time+=Art-Af
        B_time+=Brt-Bf
    else:
        A_time+=Af
        B_time+=Bf

    Alis.append(math.floor(A_time))
Ejemplo n.º 52
0
def print_total_average(total_average):
    print('Total average: {:0.1f} (exact: {:0.5f})'.format(math.floor(total_average * 10) / 10, total_average))
import math as m

print(m.floor(2.9))

print(m.pi)

print(m.e)

print(m.sqrt(25))

print(m.pow(2, 3))
Ejemplo n.º 54
0
    def process(self, O, H, L, C):
        self.grid_divider = self.box_size
        self.boxes = []
        last_box_level = 0 # последнее значение уровня
        direction = 0 # направление
        start_point_0 = 0.
        start_point = 0.
        box_size = self.box_size
        boxes_to_reverse = self.boxes_to_reverse
        data_length = H.shape[0]
        self.levels = np.zeros(data_length)
        opposite_boxes = 0.
        for i in range(data_length):           
            if direction == 0: # начальная точка        
                if C[i] > O[i]:
                    direction = 1
                    #start_point = (floor(L[i] / self.grid_divider) + 1) * self.grid_divider
                    start_point = (floor(L[i] / self.grid_divider)) * self.grid_divider
                    #start_point = L[i]
                    extremum = H[i]
                else:
                    direction = -1
                    start_point = (floor(H[i] / self.grid_divider)) * self.grid_divider
                    #start_point = H[i]
                    extremum = L[i]
                #self.start_point_graph = start_point # - direction * 0 * box_size# + direction * box_size
                distance_from_start = direction * (extremum - start_point) # positive value
                boxes_from_start = int(floor(distance_from_start / box_size))
                self.log('start {} dir {} boxes {}'.format(start_point, direction, boxes_from_start))
#                 if boxes_from_start < boxes_to_reverse:
#                     start_point = start_point - direction * (boxes_to_reverse - boxes_from_start) * box_size
#                     boxes_from_start = boxes_to_reverse
                #self.start_point_graph = start_point - direction * box_size
                self.start_point_graph = start_point
                last_box_level = start_point + direction * boxes_from_start * box_size
                boxes = direction * boxes_from_start
                self.log(boxes)
                self.boxes.append(boxes)
                self.log('start {} dir {} boxes {}'.format(start_point, direction, boxes_from_start))
            elif direction == 1 or direction == -1:
                new_last_box_level = -1.
                if direction == 1:
                    continue_level = H[i]
                    opposite_level = L[i]
                    sign = 1
                else:
                    continue_level = L[i]
                    opposite_level = H[i]
                    sign = -1
                if sign * (continue_level - last_box_level) >= box_size:
                    distance_from_start = (continue_level - start_point)
                    boxes_from_start = int(floor(distance_from_start / box_size))
                    new_last_box_level = start_point + boxes_from_start * box_size
                    #last_box_level = start_point + direction * boxes_from_start * box_size                    
                opposite_distance = sign * (last_box_level - opposite_level)        
                opposite_boxes = int(floor(opposite_distance / box_size))
                if opposite_boxes >= boxes_to_reverse and new_last_box_level < 0:                    
                    direction = - direction
                    self.boxes.append(direction * (opposite_boxes))
                    #self.boxes.append(direction * (opposite_boxes - 1))
                    #self.boxes.append(-(opposite_boxes - 2))
                    start_point = last_box_level
                    last_box_level = start_point + direction * opposite_boxes * box_size
                    #boxes_from_start = opposite_boxes
                    self.log('{} opposite boxes {} new level {} {}'.format(i, opposite_boxes, last_box_level, direction))
                elif new_last_box_level > 0:
                    last_box_level = new_last_box_level
                    #self.boxes[-1] = boxes_from_start - direction
                    self.boxes[-1] = boxes_from_start
                    self.log('{} new boxes {} new level {} {}'.format(i, boxes_from_start, last_box_level, direction))
            else:
                raise Exception('Wrong direction {}!'.format(direction))
            self.levels[i] = last_box_level
            #print(direction, opposite_boxes, last_box_level, self.boxes[-1])           
Ejemplo n.º 55
0
def interpolate(x,
                scale=None,
                output_size=None,
                mode='linear',
                align_corners=None):
    '''
    Resize an ND array with interpolation.

    Scaling factors for spatial dimensions are determined by either
    ``scale`` or ``output_size``.

    ``nd = len(scale)`` or ``nd = len(output_size)`` determines the number of
    spatial dimensions, and the last ``nd`` dimensions of the input ``x`` are    
    considered as the spatial dimensions to be resized.


    If ``scale`` is given, the ``output_size`` is calculated by
    ``output_size[i] = floor(scale[i] * x.shape[i - len(scale)])``.

    Example:

    .. code-block:: python

        import numpy as np
        import nnabla as nn
        import nnabla.functions as F

        x_data = np.random.rand(64, 3, 224, 224)
        x = nn.Variable.from_numpy_array(x_data)

        # Resize by scales
        y = F.interpolate(x, scale=(2, 2), mode='linear')
        print(y.shape)  # (64, 3, 448, 448)
        y.forward()
        print(y.d)  # Print output

        # Resize to a size
        y2 = F.interpolate(x, output_size=(320, 257), mode='linear')
        print(y2.shape)  # (64, 3, 320, 257)
        y2.forward()
        print(y2.d)  # Print output

    Args:
        x(~nnabla.Variable): N-D array with an arbitrary number of dimensions.
        scale(tuple of ints): Scale factors along axes. The default is
            ``None``, and if this is omitted, ``output_size`` must be specified.
        output_size(tuple of ints): The output sizes for axes. If this is
            given, the scale factors are determined by the output sizes and the
            input sizes. The default is ``None``, and if this is omitted,
            ``scale`` must be specified.
        mode(str): Interpolation mode chosen from ('linear'|'nearest').
            The default is 'linear'.
        align_corners(bool): If true, the corner pixels of input and output
            arrays are aligned, such that the output corner pixels have the
            same values with the input corner pixels.
            The default is ``None``, and it becomes ``True`` if mode is
            'linear', otherwise ``False``.

    Returns:
        ~nnabla.Variable: N-D array.

    '''
    from .function_bases import interpolate as interpolate_base
    import math
    if scale is None and output_size is None:
        raise ValueError('Either scale or output_size must be given')
    elif output_size is None:
        output_size = [
            int(math.floor(s * d))
            for d, s in zip(x.shape[-len(scale):], scale)
        ]
    if align_corners is None:
        if mode == 'linear':
            align_corners = True
        else:
            align_corners = False
    return interpolate_base(x, output_size, mode, align_corners)
Ejemplo n.º 56
0
def make_circuit(n:int,f) -> QuantumCircuit:
    # circuit begin
    input_qubit = QuantumRegister(n,"qc")
    classical = ClassicalRegister(n, "qm")
    prog = QuantumCircuit(input_qubit, classical)
    prog.h(input_qubit[0]) # number=3
    prog.h(input_qubit[1]) # number=4
    prog.h(input_qubit[0]) # number=57
    prog.cz(input_qubit[4],input_qubit[0]) # number=58
    prog.h(input_qubit[0]) # number=59
    prog.cx(input_qubit[4],input_qubit[0]) # number=63
    prog.z(input_qubit[4]) # number=64
    prog.cx(input_qubit[4],input_qubit[0]) # number=65
    prog.cx(input_qubit[4],input_qubit[0]) # number=56
    prog.h(input_qubit[2]) # number=50
    prog.cz(input_qubit[4],input_qubit[2]) # number=51
    prog.h(input_qubit[2]) # number=52
    prog.h(input_qubit[2]) # number=5
    prog.h(input_qubit[3]) # number=6
    prog.h(input_qubit[4])  # number=21

    Zf = build_oracle(n, f)

    repeat = floor(sqrt(2 ** n) * pi / 4)
    for i in range(repeat):
        prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
        prog.h(input_qubit[0])  # number=1
        prog.h(input_qubit[1])  # number=2
        prog.h(input_qubit[2])  # number=7
        prog.h(input_qubit[3])  # number=8


        prog.h(input_qubit[0])  # number=28
        prog.cx(input_qubit[3],input_qubit[0]) # number=60
        prog.z(input_qubit[3]) # number=61
        prog.cx(input_qubit[3],input_qubit[0]) # number=62
        prog.cz(input_qubit[1],input_qubit[0])  # number=29
        prog.h(input_qubit[0])  # number=30
        prog.h(input_qubit[0])  # number=43
        prog.cz(input_qubit[1],input_qubit[0])  # number=44
        prog.h(input_qubit[0])  # number=45
        prog.cx(input_qubit[1],input_qubit[0])  # number=35
        prog.cx(input_qubit[1],input_qubit[0])  # number=38
        prog.x(input_qubit[0])  # number=39
        prog.cx(input_qubit[1],input_qubit[0])  # number=40
        prog.cx(input_qubit[1],input_qubit[0])  # number=37
        prog.h(input_qubit[0])  # number=46
        prog.cz(input_qubit[1],input_qubit[0])  # number=47
        prog.h(input_qubit[0])  # number=48
        prog.cx(input_qubit[1],input_qubit[0])  # number=27
        prog.x(input_qubit[1])  # number=10
        prog.x(input_qubit[2])  # number=11
        prog.x(input_qubit[3])  # number=12

        if n>=2:
            prog.mcu1(pi,input_qubit[1:],input_qubit[0])

        prog.x(input_qubit[0])  # number=13
        prog.cx(input_qubit[0],input_qubit[1])  # number=22
        prog.y(input_qubit[2]) # number=41
        prog.x(input_qubit[1])  # number=23
        prog.cx(input_qubit[0],input_qubit[1])  # number=24
        prog.rx(1.0398671683382215,input_qubit[2]) # number=31
        prog.x(input_qubit[2])  # number=15
        prog.x(input_qubit[3])  # number=16


        prog.h(input_qubit[0])  # number=17
        prog.h(input_qubit[1])  # number=18
        prog.h(input_qubit[2])  # number=19
        prog.h(input_qubit[3])  # number=20


    # circuit end



    return prog
def processCurrentImage(table):
    imp = WindowManager.getCurrentImage()
    fileName = imp.getTitle()
    middleSlice = int(math.floor(imp.getNFrames() / 2.0) + (imp.getNFrames() % 2))
    imp.setSlice(middleSlice)
    IJ.run("Duplicate...", " ")
    imp.close()
    imp = WindowManager.getCurrentImage()
    
    dir = fiji.analyze.directionality.Directionality_()
    dir.setImagePlus(imp)
    dir.setMethod(fiji.analyze.directionality.Directionality_.AnalysisMethod.FOURIER_COMPONENTS)
    dir.setBinNumber(90)
    dir.setBinStart(-90)
    dir.setBuildOrientationMapFlag(False)
        
    dir.computeHistograms()
    dir.fitHistograms()
    results = dir.getFitAnalysis()
    direction = math.degrees(results[0][0])
    dispersion = math.degrees(results[0 ][1])
    amount = results[0][2]
    goodness = results[0][3]
    IJ.run("Clear Results")
    IJ.run("FFT")
    fftImp = WindowManager.getCurrentImage()
    
    IJ.run("Mean...", "radius=2");
    IJ.run("Find Maxima...", "noise=15 output=[Point Selection]")
    IJ.run("Measure")
    fftImp.changes = False
    fftImp.close()
      
    rt = ResultsTable.getResultsTable()
    size = rt.size()
    numberOfFrequences = size
    if size>=5: 
       numberOfFrequences = 5
    R = zeros('f', numberOfFrequences)
    Theta = zeros('f', numberOfFrequences)
    for i in range(0, numberOfFrequences):
       R[i] = rt.getValue("R", i)
       Theta[i] = rt.getValue("Theta", i)
    table.incrementCounter()
    table.addValue('image', fileName)
    table.addValue('Direction', direction)
    table.addValue('Dispersion', dispersion)
    table.addValue('Amount', amount)
    table.addValue('Goodness', goodness)
    for i in range(0, numberOfFrequences):
       table.addValue('R'+str(i), R[i])
       table.addValue('Theta'+str(i), Theta[i])
    
    widths = measureWidth(numberOfWidthMeasurements)
    i = 1;
    for width in widths:
      table.addValue("width" + str(i), width)        
      i = i + 1
    headings = rt.getHeadings()
    for heading in headings:
      if heading != "Label":
        value = rt.getValue(heading, 0)
        table.addValue(heading, value)
    table.show('Directonality analysis')
def split_to_traintest(in_list,percentage):
    n_train = math.floor(len(in_list)*percentage/100)
    n_test  = len(in_list) - n_train
    return in_list[0:n_train],in_list[n_train:]
Ejemplo n.º 59
0
    def _query_histograms(self):
        measurements = []
        for column_name_lower in self.scan_columns:
            scan_column: ScanColumn = self.scan_columns[column_name_lower]
            column_name = scan_column.column_name

            if scan_column.is_metric_enabled(Metric.HISTOGRAM) and scan_column.numeric_expr:

                buckets: int = scan_column.get_histogram_buckets()

                min_value = scan_column.get_metric_value(Metric.MIN)
                max_value = scan_column.get_metric_value(Metric.MAX)

                if scan_column.has_numeric_values and min_value and max_value and min_value < max_value:
                    # Build the histogram query
                    min_value = floor(min_value * 1000) / 1000
                    max_value = ceil(max_value * 1000) / 1000
                    bucket_width = (max_value - min_value) / buckets

                    boundary = min_value
                    boundaries = [min_value]
                    for i in range(0, buckets):
                        boundary += bucket_width
                        boundaries.append(round(boundary, 3))

                    group_by_cte = scan_column.get_group_by_cte()
                    numeric_value_expr = scan_column.get_group_by_cte_numeric_value_expression()

                    field_clauses = []
                    for i in range(0, buckets):
                        lower_bound = '' if i == 0 else f'{boundaries[i]} <= {numeric_value_expr}'
                        upper_bound = '' if i == buckets - 1 else f'{numeric_value_expr} < {boundaries[i + 1]}'
                        optional_and = '' if lower_bound == '' or upper_bound == '' else ' and '
                        field_clauses.append(
                            f'SUM(CASE WHEN {lower_bound}{optional_and}{upper_bound} THEN frequency END)')

                    fields = ',\n  '.join(field_clauses)

                    sql = (f'{group_by_cte} \n'
                           f'SELECT \n'
                           f'  {fields} \n'
                           f'FROM group_by_value')

                    if self.filter_sql:
                        sql += f' \nWHERE {self.scan.filter_sql}'

                    row = self.warehouse.sql_fetchone(sql)

                    # Process the histogram query
                    frequencies = []
                    for i in range(0, buckets):
                        frequency = row[i]
                        frequencies.append(0 if not frequency else int(frequency))
                    histogram = {
                        'boundaries': boundaries,
                        'frequencies': frequencies
                    }

                    self._log_and_append_query_measurement(
                        measurements, Measurement(Metric.HISTOGRAM, column_name, histogram))
        self._flush_measurements(measurements)
#author: moyuweiqing
#情感分析-by-snownlp&matplotlib

import os
from snownlp import SnowNLP
import matplotlib.pyplot as plt
import numpy as np
import math

path = os.path.abspath('..')
text = open(path + '\dependence\小王子.txt')
text = text.read()
s1 = text.replace('\n', '').replace(' ', '').replace('.', '。')#去除换行

#建立情感分析
sn1 = SnowNLP(s1)
sentimentslist = []
for i in sn1.sentences:
    j = SnowNLP(i)
    sentimentslist.append(j.sentiments)

#可视化处理,使用matplotlib
dic = {}
for i in np.arange(0, 1, 0.02):
    index = round(i, 2)
    dic[index] = 0
for i in sentimentslist:
    temp = round(math.floor(i/0.02)*0.02, 2)
    dic[temp] = dic[temp] + 1
plt.hist(sentimentslist,bins=np.arange(0,1,0.02))
plt.savefig(path+'\Results\sentimental_analysis(小王子).png')