Пример #1
1
    def __call__(self):
        "Return the locations of the ticks"
        b = self._transform.base

        vmin, vmax = self.axis.get_view_interval()
        vmin, vmax = self._transform.transform_point((vmin, vmax))
        if vmax < vmin:
            vmin, vmax = vmax, vmin
        numdec = math.floor(vmax) - math.ceil(vmin)

        if self._subs is None:
            if numdec > 10:
                subs = np.array([1.0])
            elif numdec > 6:
                subs = np.arange(2.0, b, 2.0)
            else:
                subs = np.arange(2.0, b)
        else:
            subs = np.asarray(self._subs)

        stride = 1
        while numdec / stride + 1 > self.numticks:
            stride += 1

        decades = np.arange(math.floor(vmin), math.ceil(vmax) + stride, stride)
        if len(subs) > 1 or subs[0] != 1.0:
            ticklocs = []
            for decade in decades:
                ticklocs.extend(subs * (np.sign(decade) * b ** np.abs(decade)))
        else:
            ticklocs = np.sign(decades) * b ** np.abs(decades)
        return np.array(ticklocs)
Пример #2
0
 def from_extents(self, ulx, uly, lrx, lry):
   '''
   Set value from an extent tuple.
   For example, cairo extents, floats, inked, converted to DCS.
   
   !!! ulx may be greater than lrx, etc. due to transformations
   
   !!! Extents may be either path (ideal) or stroke (inked).
   The ideal extent of a line can have zero width or height.
   
   !!! User may zoom out enough that bounds approach zero,
   even equal zero?
   
   !!! Parameters are float i.e. fractional.
   Bounds are snapped to the outside pixel boundary.
   '''
   # Snap to integer boundaries and order on the number line.
   # !!! Note int(floor()) not just int()
   minxi = int(math.floor(min(ulx, lrx)))
   minyi = int(math.floor(min(uly, lry)))
   maxxi = int(math.ceil(max(ulx, lrx)))
   maxyi = int(math.ceil(max(uly, lry)))
   width = maxxi - minxi
   height = maxyi - minyi
   # width or height or both can be zero, for example setting transform on empty model
   
   # snap float rect to outside integral pixel
   ## self = gdk.Rectangle(minxi, minyi, width, height)
   self = Bounds(minxi, minyi, width, height)
   # not assert x,y positive
   # assert self.width >= 0  # since abs used
   if self.is_null():
     print "!!!!!!!!!!!! Null bounds", self
   return self
Пример #3
0
def get_slot_utilization(cascade, NUMFIELDS=10000):
	"""
	<p>Each number represents a block of "groupSize" key slots.
                Numbers 0 through 9 indicate how many slots are used in that block; with 0 all slots are empty and with
                9 all are utilized.
                Note that these <q>blocks</q> are only used here for visualizing allocation. They don't align with key
                partitions or anything.</p>
	:param cascade:
	:return:
	"""
	reverse = reverse_slot_mapping(cascade)
	s = ""
	MAXVAL = 9
	groupSize = math.floor((configuration.LAST_OBJCT_KEY_SLOT - configuration.FIRST_OBJECT_KEY_SLOT + 1) / NUMFIELDS)
	remainder = (configuration.LAST_OBJCT_KEY_SLOT - configuration.FIRST_OBJECT_KEY_SLOT + 1) - groupSize * NUMFIELDS
	currentPos = 0
	foundInGroup = 0
	for slot in range(configuration.FIRST_OBJECT_KEY_SLOT, configuration.LAST_OBJCT_KEY_SLOT):
		if currentPos == groupSize:
			s += str(math.ceil(MAXVAL/groupSize * foundInGroup))
			currentPos = 0
			foundInGroup = 0
		if slot in reverse:
			foundInGroup += 1
		currentPos += 1
	if remainder:
		s += str(math.ceil(MAXVAL/remainder * foundInGroup))
	return json.dumps({"groupSize": groupSize, "blocks": NUMFIELDS, "alloc": s})
Пример #4
0
def updateAxes():
    pygame.event.pump()

    controllerDict = {'X-Axis1': 0, 'Y-Axis2': 0, 'X-Axis2': 0, 'Y-Axis2': 0};

    xAxis = my_joystick.get_axis(0) 
    yAxis = my_joystick.get_axis(1) * -1
    aAxis = my_joystick.get_axis(2)
    bAxis = my_joystick.get_axis(3) * -1

    if xAxis < 0.1 and xAxis > -0.1:
        xAxis = 0
    if yAxis < 0.1 and yAxis > -0.1:
        yAxis = 0
    if aAxis < 0.1 and aAxis > -0.1:
        aAxis = 0
    if bAxis < 0.1 and bAxis > -0.1:
        bAxis = 0

    xAxis = math.ceil(xAxis*10000)/10000
    yAxis = math.ceil(yAxis*10000)/10000
    aAxis = math.ceil(aAxis*10000)/10000
    bAxis = math.ceil(bAxis*10000)/10000

    #print 'X-Axis 1: ' + str(xAxis) + '  Y-Axis 1: ' + str(yAxis)
    #print 'X-Axis 2: ' + str(aAxis) + '  Y-Axis 2: ' + str(bAxis)

    controllerDict['X-Axis1'] = xAxis;
    controllerDict['Y-Axis1'] = yAxis;
    controllerDict['X-Axis2'] = aAxis;
    controllerDict['Y-Axis2'] = bAxis;

    return controllerDict
Пример #5
0
def is_converged(hartree_parameters, structure, return_values=False):
    filename = s_name(structure) + ".conv_res"
    to_return = {}
    try:
        f = open(filename, mode='r')
        conv_res = ast.literal_eval(f.read())
        f.close()
        converged = True if True in conv_res['control'].values() else False
    except (IOError, OSError, ValueError):
        if return_values:
            print('Inputfile ', filename, ' not found, the convergence calculation did not finish properly' \
                                          ' or was not parsed ...')
        converged = False
        return converged
    if return_values and converged:
        if hartree_parameters:
            try:
                conv_res['values']['ecut'] = 4 * math.ceil(conv_res['values']['ecut'] * eV_to_Ha / 4)
            except (KeyError, ArithmeticError, FloatingPointError, SyntaxError):
                pass
            try:
                conv_res['values']['ecuteps'] = 4 * math.ceil(conv_res['values']['ecuteps'] * eV_to_Ha / 4)
            except (KeyError, ArithmeticError, FloatingPointError, SyntaxError):
                pass
        for k in conv_res['values'].keys():
            if conv_res['values'][k] != 0 and conv_res['values'][k] != np.inf:
                to_return.update({k: conv_res['values'][k]})
        return to_return
    else:
        return converged
Пример #6
0
 def sharkeventkinshnavaw(self, a_ship, a_screen):
     randnum = random.randint(1, 10)
     if randnum < 10:
         self.mess = "What were you thinking? The king shark sees all in the seas, nothing escapes it's sight"
         self.mess += "What would you order your crew to do? The shark is still attacking."
         messobj = StrObj.render(self.mess[0:33], 1, (0, 0, 0))
         a_screen.blit(messobj, (self.xpos, 5))
         messobj = StrObj.render(self.mess[33:68], 1, (0, 0, 0))
         a_screen.blit(messobj, (self.xpos, 25))
         messobj = StrObj.render(self.mess[69:87], 1, (0, 0, 0))
         a_screen.blit(messobj, (self.xpos, 45))
         messobj = StrObj.render(self.mess[87:113], 1, (0, 0, 0))
         a_screen.blit(messobj, (self.xpos, 65))
         messobj = StrObj.render(self.mess[113:143], 1, (0, 0, 0))
         a_screen.blit(messobj, (self.xpos, 85))
         messobj = StrObj.render(self.mess[144:], 1, (0, 0, 0))
         a_screen.blit(messobj, (self.xpos, 105))
         randnum = random.randint(0, 9)
         a_ship.ShipHp -= int(math.ceil(randnum * .55))
         a_ship.CrewHp -= int(math.ceil(randnum * .45))
         self.MenuOp.initshkinafternavawf(170)
         self.MenuOp.drawmen(a_screen, 0)
     else:
         self.mess = "Impossible! The king shark did not see us! The fates must be asleep."
         messobj = StrObj.render(self.mess[0:35], 1, (0, 0, 0))
         a_screen.blit(messobj, (self.xpos, 5))
         messobj = StrObj.render(self.mess[35:], 1, (0, 0, 0))
         a_screen.blit(messobj, (self.xpos, 25))
    def list_to_roll(self, data, time_resolution=0.01):
        """Convert event list into event roll.
        Event roll is binary matrix indicating event activity withing time segment defined by time_resolution.

        Parameters
        ----------
        data : list
            Event list, list of event dicts

        time_resolution : float > 0
            Time resolution used when converting event into event roll.

        Returns
        -------
        event_roll : numpy.ndarray [shape=(math.ceil(data_length * 1 / time_resolution) + 1, amount of classes)]
            Event roll
        """

        # Initialize
        data_length = self.max_event_offset(data)
        event_roll = numpy.zeros((math.ceil(data_length * 1 / time_resolution) + 1, len(self.class_list)))

        # Fill-in event_roll
        for event in data:
            pos = self.class_list.index(event['event_label'].rstrip())

            onset = math.floor(event['event_onset'] * 1 / time_resolution)
            offset = math.ceil(event['event_offset'] * 1 / time_resolution) + 1

            event_roll[onset:offset, pos] = 1

        return event_roll
Пример #8
0
def _download(url):
    """
    Do a wget: Download the file specified in url to the cwd.
    Return the filename.
    """
    filename = os.path.split(url)[1]
    req = requests.get(url, stream=True, headers={'User-Agent': 'PyBOMBS'})
    filesize = float(req.headers['content-length'])
    filesize_dl = 0
    with open(filename, "wb") as f:
        for buff in req.iter_content(chunk_size=8192):
            if buff:
                f.write(buff)
                filesize_dl += len(buff)
            # TODO wrap this into an output processor or at least
            # standardize the progress bars we use
            status = r"%05d kB / %05d kB (%03d%%)" % (
                    int(math.ceil(filesize_dl/1000.)),
                    int(math.ceil(filesize/1000.)),
                    int(math.ceil(filesize_dl*100.)/filesize)
            )
            status += chr(8)*(len(status)+1)
            sys.stdout.write(status)
    sys.stdout.write("\n")
    return filename
Пример #9
0
def analyze_content(size):
  """Analyze content for basic size information.

  Gives basic information about the total size of
  content in MegaBytes and the suitable storage medium to copy
  content.

  Args:
    size: Size of intended storage medium in MB
  """
  with open("./config.yaml") as data_file:
    conf_data = yaml.load(data_file)

  src_dir = conf_data["source"]["main_path"]
  total_size = get_size(src_dir) / 1000000.00

  if not size:
    single_layered_disc = int(math.ceil(total_size / 4700))
    dual_layered_disc = int(math.ceil(total_size / 8500))
    flash = int(math.ceil(total_size / 16000))
    click.echo("The total size of content is {0}MB".format(total_size))
    click.echo("You need {0} single-layered DVD disc(s) or {1} dual-layered"
               " DVD disc(s) to copy content".format(single_layered_disc,
                                                     dual_layered_disc))
    click.echo(
        " OR You need {0} (16GB) flash drive(s) to copy content".format(flash))
  else:
    device_number = int(math.ceil(total_size / int(size)))
    click.echo("The total size of content is {0}MB".format(total_size))
    click.echo(
        "You need {0} storage device of this size to copy content".format(device_number))
Пример #10
0
    def SetValue(self, value):
        """ Sets the FloatSpin value. """

        if not self._textctrl or not self.InRange(value):
            return

        if self._snapticks and self._increment != 0.0:

            finite, snap_value = self.IsFinite(value)

            if not finite: # FIXME What To Do About A Failure?

                if (snap_value - floor(snap_value) < ceil(snap_value) - snap_value):
                    value = self._defaultvalue + floor(snap_value)*self._increment
                else:
                    value = self._defaultvalue + ceil(snap_value)*self._increment

        strs = ("%100." + str(self._digits) + self._textformat[1])%value
        strs = strs.strip()
        strs = self.ReplaceDoubleZero(strs)

        if value != self._value or strs != self._textctrl.GetValue():

            self._textctrl.SetValue(strs)
            self._textctrl.DiscardEdits()
            self._value = value
Пример #11
0
    def handle(self, *args, **options):
        """
        Основной метод - точка входа
        """

        query = Q()

        urls = [
            'allmychanges.com',
            'stackoverflow.com',
        ]
        for entry in urls:
            query = query | Q(link__contains=entry)

        items = Item.objects.exclude(query).order_by('?')

        items_cnt = items.count()
        train_size = math.ceil(items_cnt * (options['percent'] / 100))
        # test_size = items_cnt - train_size

        train_part_size = math.ceil(train_size / options['cnt_parts'])

        train_set = items[:train_size]
        test_set = items[train_size:]

        for part in range(options['cnt_parts']):
            name = 'data_{0}_{1}.json'.format(train_part_size, part)
            queryset = train_set[part * train_part_size: (part + 1) * train_part_size]
            create_dataset(queryset, name)

        with open(os.path.join(settings.DATASET_FOLDER, 'test_set_ids.txt'), 'w') as fio:
            fio.writelines(['%s\n' % x for x in test_set.values_list('id', flat=True)])
Пример #12
0
    def __init__(self, n, n_iter=3, train_size=.5, test_size=None,
                 random_state=None, n_bootstraps=None):
        self.n = n
        if n_bootstraps is not None:  # pragma: no cover
            warnings.warn("n_bootstraps was renamed to n_iter and will "
                          "be removed in 0.16.", DeprecationWarning)
            n_iter = n_bootstraps
        self.n_iter = n_iter
        if (isinstance(train_size, numbers.Real) and train_size >= 0.0
                and train_size <= 1.0):
            self.train_size = int(ceil(train_size * n))
        elif isinstance(train_size, numbers.Integral):
            self.train_size = train_size
        else:
            raise ValueError("Invalid value for train_size: %r" %
                             train_size)
        if self.train_size > n:
            raise ValueError("train_size=%d should not be larger than n=%d" %
                             (self.train_size, n))

        if isinstance(test_size, numbers.Real) and 0.0 <= test_size <= 1.0:
            self.test_size = int(ceil(test_size * n))
        elif isinstance(test_size, numbers.Integral):
            self.test_size = test_size
        elif test_size is None:
            self.test_size = self.n - self.train_size
        else:
            raise ValueError("Invalid value for test_size: %r" % test_size)
        if self.test_size > n:
            raise ValueError("test_size=%d should not be larger than n=%d" %
                             (self.test_size, n))

        self.random_state = random_state
Пример #13
0
 def attack(self, player, enemy):
     player_accuracy = (ceil(player.attack_current / 2.0) + player.proficiency_current) - (ceil(enemy.defense_current / 2.0) + enemy.proficiency_current)
     if player_accuracy < -10:
         player_accuracy = -10
     elif player_accuracy > 10:
         player_accuracy = 10
     player_accuracy = int(player_accuracy * 5) + 85 + randint(0, 5)
     enemy_accuracy  = (ceil(enemy.attack_current / 2.0) + enemy.proficiency_current) - (ceil(player.defense_current / 2.0) + player.proficiency_current)
     if enemy_accuracy < -10:
         enemy_accuracy = -10
     elif enemy_accuracy > 10:
         enemy_accuracy = 10
     enemy_accuracy = int(enemy_accuracy * 5) + 85 + randint(0, 5)
     player_damage = 0
     enemy_damage = 0
     state = "miss"
     if randint(0, 100) <= player_accuracy:
         player_damage = player.attack_current - int(enemy.defense_current * 0.65)
         if player_damage < 0:
             player_damage = 0
         state = enemy.take_damage(player_damage)
     if state == "alive" or state == "miss":
         if randint(0, 100) <= enemy_accuracy:
             enemy_damage = enemy.attack_current - int(player.defense_current * 0.65)
             if enemy_damage < 0:
                 enemy_damage = 0
             player.take_damage(enemy_damage)
         return ("You attack, dealing {0} damage and taking {1} damage".format(player_damage, enemy_damage), curses.color_pair(2))
     elif state == "miss":
         return ("You miss and take {0}".format(enemy_damage), curses.color_pair(2))
     elif state == "dead":
         return ("You attack, dealing {0} damage".format(player_damage), curses.color_pair(2))
     else:
         raise ValueError("Unit is not alive or dead")
def NNI(dir):
    All  = os.listdir(dir)
    NumL = [int(I[0:-4]) for I in All]
    n = len(All)
    if n < 32:
        Ext = [int(math.ceil(x*32./n)) for x in NumL]
        for i in range(n):
            os.rename(os.path.join(dir, All[i]), os.path.join(dir,'__' + str(Ext[i]) + '.png'))
        for i in range(n):
            os.rename(os.path.join(dir, '__' + str(Ext[i]) + '.png'), os.path.join(dir, str(Ext[i]) + '.png'))
        Dif = list(set(range(1,33)).difference(set(Ext)))
        Dif.sort()
        Ext.sort()
        for i in Dif:
            item = [x>i for x in Ext].index(True)
            obj  = Ext[item]
            os.system('cp '+os.path.join(dir,str(obj)+'.png')+' '+os.path.join(dir,str(i)+'.png'))
    elif n > 32:
        Ext = [int(math.ceil(x*n/32.)) for x in range(1,33)]
        m = len(Ext)
        Dif = list(set(NumL).difference(set(Ext)))
        Dif.sort()
        Ext.sort()
        for i in Dif:
            os.remove(os.path.join(dir, str(i) + '.png'))
        for i in range(m):
            os.rename(os.path.join(dir, str(Ext[i]) + '.png'),os.path.join(dir, '__'+str(i+1) + '.png'))
        for i in range(m):
            os.rename(os.path.join(dir, '__'+str(i+1) + '.png'),os.path.join(dir, str(i+1) + '.png'))
Пример #15
0
def add_header_and_rotate_timeline(target, height, operations_height):
    header_root = os.path.join(settings.BASE_DIR, 'base', 'static', 'doc', 'header.pdf')
    result_root = os.path.join(settings.BASE_DIR, 'media', 'report.pdf')
    pdf_target = PyPDF2.PdfFileReader(open(target, 'rb'))
    pdf_header = PyPDF2.PdfFileReader(open(header_root, 'rb'))
    pdf_writer = PyPDF2.PdfFileWriter()

    total_pages = int(math.ceil(height / PIXELS_X_PAGE))
    total_pages_x_rows = int(math.ceil(operations_height / ROWS_X_PAGE))

    for page_num in range(pdf_target.getNumPages()):
        pdf_target.getPage(page_num).mergePage(pdf_header.getPage(0))

    if pdf_target.getNumPages() - 1 == 4 + total_pages_x_rows + total_pages:
        total_pages -= 1

    for page_num in range(pdf_target.getNumPages()):
        page = pdf_target.getPage(page_num)
        if (page_num >= (4 + total_pages_x_rows)) \
                and (page_num <= (4 + total_pages_x_rows + total_pages)):
            page.rotateClockwise(-90)
        pdf_writer.addPage(page)

    result = open(result_root, 'wb')
    pdf_writer.write(result)
    result.close()
Пример #16
0
 def setAppearance(self):
     """
     A setter for the appearance of the tower.
     """
     self.towerDiv = avg.DivNode(size=util.towerDivSize, pos=(self.pos.x - util.towerDivSize[0]//2, self.pos.y-util.towerDivSize[1]//2))
     
     #sets the explosion radius
     self.towerCircle = avg.CircleNode(fillopacity=0.3, strokewidth=0, fillcolor=self.team.color, r=self.towerDiv.size.x//2, pos=(self.towerDiv.size.x//2,self.towerDiv.size.y//2), parent=self.towerDiv)
     
     
     #sets the fancy snow balls
     
     for i in xrange(5):
         radius = self.towerDiv.size[0]//10
         xPos = random.randint(0 + math.floor(radius), math.ceil(self.towerDiv.size.x - radius))
         yPos = random.randint(0 + math.floor(radius), math.ceil(self.towerDiv.size.y - radius))
         
         snowball = avg.CircleNode(fillopacity=0.5, strokewidth=0, filltexhref=os.path.join(getMediaDir(__file__, "resources"), "snowflakes.png"), r=radius, pos=(xPos,yPos), parent=self.towerDiv)
         
         self.snowballAnim(xPos,yPos,snowball)
         
     
     self.tower = avg.RectNode(fillopacity=1, strokewidth=0, size=util.towerSize, pos=(self.pos.x  - util.towerSize[0] // 2, self.pos.y - util.towerSize[1] // 2))
     
     
     if self.team.name == "Team2":
         self.tower.filltexhref = os.path.join(getMediaDir(__file__, "resources"), "iceball.png")
     else:
         self.tower.filltexhref = os.path.join(getMediaDir(__file__, "resources"), "iceball.png")
Пример #17
0
def _convert_time(byte_list):
    [sec,minutes,hr,day,mon,yr] = byte_list[-6:]
    ret_sec =math.ceil(sec/4)
    ret_day =math.ceil(day/4)
    ret_yr = yr + 1985
    reported_time = '%d-%02d-%02dT%02d:%02d:%02d+00:00'  % (ret_yr,mon,ret_day,hr,minutes,ret_sec)
    return reported_time
Пример #18
0
	def format(self, value, form, length=32):
		""" Format a number based on a format character and length
		"""
		# get current gdb radix setting
		radix = int(re.search("\d+", gdb.execute("show output-radix", True, True)).group(0))

		# override it if asked to
		if form == 'x' or form == 'a':
			radix = 16
		elif form == 'o':
			radix = 8
		elif form == 'b' or form == 't':
			radix = 2

		# format the output
		if radix == 16:
			# For addresses, probably best in hex too
			l = int(math.ceil(length/4.0))
			return "0x"+"{:X}".format(value).zfill(l)
		if radix == 8:
			l = int(math.ceil(length/3.0))
			return "0"+"{:o}".format(value).zfill(l)
		if radix == 2:
			return "0b"+"{:b}".format(value).zfill(length)
		# Default: Just return in decimal
		return str(value)
Пример #19
0
def new_cm_as_tankmenResponseS(self, data):
    for tankmenData in data['tankmen']:
        tankman = g_itemsCache.items.getTankman(tankmenData['tankmanID'])
        tankmanDossier = g_itemsCache.items.getTankmanDossier(tankman.invID)
        avgXp = float(tankmanDossier.getAvgXP())
        if tankman.isInTank:
            vehicle = g_itemsCache.items.getVehicle(tankman.vehicleInvID)
            if vehicle is not None and vehicle.isPremium:
                avgXp = (avgXp + int(avgXp*(9 - max(min(vehicle.level, 9), 2))/10.0))*1.5
        nextLevelBattleCount = wotxp.numWithPostfix(math.ceil(tankman.getNextLevelXpCost()/avgXp)) if avgXp > 0 else 'X'
        nextSkillBattleCount = wotxp.numWithPostfix(math.ceil(tankman.getNextSkillXpCost()/avgXp)) if avgXp > 0 else 'X'
        values = {}
        values['freeXp'] = wotxp.numWithPostfix(tankman.descriptor.freeXP)
        values['nextLevelBattleCount'] = nextLevelBattleCount
        values['nextSkillBattleCount'] = nextSkillBattleCount
        values['nextLevelXpCost'] = wotxp.numWithPostfix(tankman.getNextLevelXpCost())
        values['nextSkillXpCost'] = wotxp.numWithPostfix(tankman.getNextSkillXpCost())
        rankPrefix = ''
        rolePrefix = ''
        if tankman.newSkillCount[0] > 0:
            rankPrefix = wotxp.config.get("tankmanNewSkillRankPrefix", "[+{{freeXp}}]")
            rolePrefix = wotxp.config.get("tankmanNewSkillRolePrefix", "")
        else:
            rankPrefix = wotxp.config.get("tankmanRankPrefix", "[{{nextLevelBattleCount}}|{{nextLevelXpCost}}]")
            rolePrefix = wotxp.config.get("tankmanRolePrefix", "[{{nextSkillBattleCount}}|{{nextSkillXpCost}}]")
        for key in values.keys():
            rankPrefix = rankPrefix.replace('{{%s}}' % key, values[key])
            rolePrefix = rolePrefix.replace('{{%s}}' % key, values[key])
        tankmenData['rank'] = rankPrefix + tankmenData['rank']
        tankmenData['role'] = rolePrefix + tankmenData['role']
    old_cm_as_tankmenResponseS(self, data)
Пример #20
0
def getDurationString(delta, showDays=True, showHours=True, showMinutes=True, showSeconds=False):

    string = [""]

    def appendDuration(num, singular, plural, showZero=False):
        if (num < 0 or num > 1):
            string[0] += "%d %s " % (num, plural)
        elif (num == 1):
            string[0] += "%d %s " % (num, singular)
        elif(showZero):
            string[0] += "%d %s " % (num, plural)
    
    if (showDays):
        appendDuration(delta.days, "day", "days")

    if (showHours):
        appendDuration(int(math.ceil(delta.seconds / 3600)), "hour", "hours")

    if (showMinutes):
        appendDuration(int(math.ceil((delta.seconds % 3600) / 60)), "minute", "minutes", showZero=True)

    if (showSeconds):
        appendDuration(delta.seconds % 60, "second", "seconds")

    return string[0][:-1]
Пример #21
0
 def __init__(self, input_fasta, database,
              num_parts     = None,   # How many fasta pieces should we make
              part_size     = None,   # What size in MB should a fasta piece be
              seqs_per_part = None,   # How many sequences in one fasta piece
              slurm_params  = None,   # Additional parameters for possible SLURM jobs
              parts_dir     = None,   # If you want a special direcotry for the fasta pieces
              **kwargs):
     # Determine number of parts #
     self.num_parts = None
     # Three possible options #
     if num_parts:
         self.num_parts = num_parts
     if part_size:
         self.bytes_target = humanfriendly.parse_size(part_size)
         self.num_parts = int(math.ceil(input_fasta.count_bytes / self.bytes_target))
     if seqs_per_part:
         self.num_parts = int(math.ceil(input_fasta.count / seqs_per_part))
     # Default case #
     if self.num_parts is None:
         self.num_parts = kwargs.get('num_threads', min(multiprocessing.cpu_count(), 32))
     # In case the user has some special slurm params #
     self.slurm_params = slurm_params
     # In case the user wants a special parts directory #
     self.parts_dir = parts_dir
     # Super #
     SeqSearch.__init__(self, input_fasta, database, **kwargs)
Пример #22
0
    def plot_cost(self):
        if self.show_cost not in self.train_outputs[0][0]:
            raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
        train_errors = [o[0][self.show_cost][self.cost_idx] for o in self.train_outputs]
        test_errors = [o[0][self.show_cost][self.cost_idx] for o in self.test_outputs]

        numbatches = len(self.train_batch_range)
        test_errors = numpy.row_stack(test_errors)
        test_errors = numpy.tile(test_errors, (1, self.testing_freq))
        test_errors = list(test_errors.flatten())
        test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
        test_errors = test_errors[:len(train_errors)]

        numepochs = len(train_errors) / float(numbatches)
        pl.figure(1)
        x = range(0, len(train_errors))
        pl.plot(x, train_errors, 'k-', label='Training set')
        pl.plot(x, test_errors, 'r-', label='Test set')
        pl.legend()
        ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
        epoch_label_gran = int(ceil(numepochs / 20.)) # aim for about 20 labels
        epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) # but round to nearest 10
        ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))

        pl.xticks(ticklocs, ticklabels)
        pl.xlabel('Epoch')
#        pl.ylabel(self.show_cost)
        pl.title(self.show_cost)
Пример #23
0
 def readEgg(self,filename):
     """ reads the egg file and stores the info"""
     egg = EggData()
     egg.read(filename)
     for char in getAllEggGroup(egg):
         name = str(char.getName())
         
         image = Image()
         image.name = name
         self.images[name] = image 
         
         pos = char.getComponentVec3(0)
         
         for p in getAllType(char,EggPolygon):     
                  
             if p.getNumVertices() > 2:
                 v = p.getVertex(2)
                 vpos = v.getPos3()-pos
                 image.x = pos.getX()
                 image.y = pos.getZ()
                 image.w = vpos.getX()
                 image.h = vpos.getZ()
                 
             for l in getAllType(char,EggPoint):
                 extend = l.getVertex(0).getPos3()-pos
                 image.extend = math.ceil(extend.getX()),math.ceil(extend.getZ())
Пример #24
0
    def gaussian_smear(self, r):
        """
        Applies an isotropic Gaussian smear of width (standard deviation) r to the potential field. This is necessary to
        avoid finding paths through narrow minima or nodes that may exist in the field (although any potential or
        charge distribution generated from GGA should be relatively smooth anyway). The smearing obeys periodic
        boundary conditions at the edges of the cell.

        :param r - Smearing width in cartesian coordinates, in the same units as the structure lattice vectors
        """
        # Since scaling factor in fractional coords is not isotropic, have to have different radii in 3 directions
        a_lat = self.__s.lattice.a
        b_lat = self.__s.lattice.b
        c_lat = self.__s.lattice.c

        # Conversion factors for discretization of v
        v_dim = self.__v.shape
        r_frac = (r / a_lat, r / b_lat, r / c_lat)
        r_disc = (int(math.ceil(r_frac[0] * v_dim[0])), int(math.ceil(r_frac[1] * v_dim[1])),
                  int(math.ceil(r_frac[2] * v_dim[2])))

        # Apply smearing
        # Gaussian filter
        gauss_dist = np.zeros((r_disc[0] * 4 + 1, r_disc[1] * 4 + 1, r_disc[2] * 4 + 1))
        for g_a in np.arange(-2.0 * r_disc[0], 2.0 * r_disc[0] + 1, 1.0):
            for g_b in np.arange(-2.0 * r_disc[1], 2.0 * r_disc[1] + 1, 1.0):
                for g_c in np.arange(-2.0 * r_disc[2], 2.0 * r_disc[2] + 1, 1.0):
                    g = np.array([g_a / v_dim[0], g_b / v_dim[1], g_c / v_dim[2]]).T
                    gauss_dist[int(g_a + r_disc[0])][int(g_b + r_disc[1])][int(g_c + r_disc[2])] = la.norm(np.dot(self.__s.lattice.matrix, g))/r
        gauss = scipy.stats.norm.pdf(gauss_dist)
        gauss = gauss/np.sum(gauss, dtype=float)
        padded_v = np.pad(self.__v, ((r_disc[0], r_disc[0]), (r_disc[1], r_disc[1]), (r_disc[2], r_disc[2])), mode='wrap')
        smeared_v = scipy.signal.convolve(padded_v, gauss, mode='valid')
        self.__v = smeared_v
Пример #25
0
    def from_jd(cls, jd):
        """
            Convert a Julian day number to a year/month/day tuple
            of this calendar (matching jQuery calendars algorithm)

            @param jd: the Julian day number
        """

        jd = math.floor(jd) + 0.5;

        depoch = jd - cls.to_jd(475, 1, 1)

        cycle = math.floor(depoch / 1029983)
        cyear = math.fmod(depoch, 1029983)

        if cyear != 1029982:
            aux1 = math.floor(cyear / 366)
            aux2 = math.fmod(cyear, 366)
            ycycle = math.floor(((2134 * aux1) + (2816 * aux2) + 2815) / 1028522) + aux1 + 1
        else:
            ycycle = 2820

        year = ycycle + (2820 * cycle) + 474
        if year <= 0:
            year -= 1

        yday = jd - cls.to_jd(year, 1, 1) + 1
        if yday <= 186:
            month = math.ceil(yday / 31)
        else:
            month = math.ceil((yday - 6) / 30)

        day = jd - cls.to_jd(year, month, 1) + 1

        return (int(year), int(month), int(day))
Пример #26
0
    def _max_min(self):
        """
        Calculate minimum and maximum for the axis adding some padding.
        There are always a maximum of ten units for the length of the axis.
        """
        value = length = self._max - self._min

        sign = value/value
        zoom = less_than_one(value) or 1
        value = value * zoom
        ab = abs(value)
        value = math.ceil(ab * 1.1) * sign

        # calculate tick
        l = math.log10(abs(value))
        exp = int(l)
        mant = l - exp
        unit = math.ceil(math.ceil(10**mant) * 10**(exp-1))
        # recalculate max
        value = math.ceil(value / unit) * unit
        unit = unit / zoom

        if value / unit > 9:
            # no more that 10 ticks
            unit *= 2
        self.unit = unit
        scale = value / length
        mini = math.floor(self._min * scale) / zoom
        maxi = math.ceil(self._max * scale) / zoom
        return mini, maxi
Пример #27
0
 def __setAccountsAttrs(self, isPremiumAccount, premiumExpiryTime = 0):
     disableTTHeader = ''
     disableTTBody = ''
     isNavigationEnabled = True
     if self.prbDispatcher:
         isNavigationEnabled = not self.prbDispatcher.getFunctionalState().isNavigationDisabled()
     if isPremiumAccount:
         if not premiumExpiryTime > 0:
             raise AssertionError
             deltaInSeconds = float(time_utils.getTimeDeltaFromNow(time_utils.makeLocalServerTime(premiumExpiryTime)))
             if deltaInSeconds > time_utils.ONE_DAY:
                 timeLeft = math.ceil(deltaInSeconds / time_utils.ONE_DAY)
                 timeMetric = i18n.makeString('#menu:header/account/premium/days')
             else:
                 timeLeft = math.ceil(deltaInSeconds / time_utils.ONE_HOUR)
                 timeMetric = i18n.makeString('#menu:header/account/premium/hours')
             buyPremiumLabel = i18n.makeString('#menu:headerButtons/doLabel/premium')
             premiumBtnLbl = makeHtmlString('html_templates:lobby/header', 'premium-account-label', {'timeMetric': timeMetric,
              'timeLeft': timeLeft})
             canUpdatePremium = deltaInSeconds < time_utils.ONE_YEAR
         else:
             canUpdatePremium = True
             premiumBtnLbl = makeHtmlString('html_templates:lobby/header', 'base-account-label')
             buyPremiumLabel = i18n.makeString('#menu:common/premiumBuy')
         if not canUpdatePremium:
             disableTTHeader = i18n.makeString(TOOLTIPS.LOBBY_HEADER_BUYPREMIUMACCOUNT_DISABLED_HEADER)
             disableTTBody = i18n.makeString(TOOLTIPS.LOBBY_HEADER_BUYPREMIUMACCOUNT_DISABLED_BODY, number=time_utils.ONE_YEAR / time_utils.ONE_DAY)
         self.as_doDisableHeaderButtonS(self.BUTTONS.PREM, canUpdatePremium and isNavigationEnabled)
         hasPersonalDiscount = len(g_itemsCache.items.shop.personalPremiumPacketsDiscounts) > 0
         tooltip = canUpdatePremium or {'header': disableTTHeader,
          'body': disableTTBody}
     else:
         tooltip = TOOLTIPS.HEADER_PREMIUM_EXTEND if isPremiumAccount else TOOLTIPS.HEADER_PREMIUM_BUY
     self.as_setPremiumParamsS(isPremiumAccount, premiumBtnLbl, buyPremiumLabel, canUpdatePremium, disableTTHeader, disableTTBody, hasPersonalDiscount, tooltip, TOOLTIP_TYPES.COMPLEX)
Пример #28
0
def generate_shifts_2d(width, height, n_samples, with_hot=False):
    x_shifts = gpu_rng.gen_uniform((n_samples,), np.float32) * (width - 0.01)
    x_shifts = x_shifts.astype(np.uint32)

    y_shifts = gpu_rng.gen_uniform((n_samples,), np.float32) * (height - 0.01)
    y_shifts = y_shifts.astype(np.uint32)

    if with_hot:
        shifts_hot = gp.empty((width * height, n_samples), np.float32)
        threads_per_block = 32
        n_blocks = int(math.ceil(n_samples / threads_per_block))
        gpu_shift_to_hot_2d(x_shifts, y_shifts, shifts_hot,
                            np.uint32(shifts_hot.strides[0]/4),
                            np.uint32(shifts_hot.strides[1]/4),
                            np.uint32(width), np.uint32(height), np.uint32(n_samples),
                            block=(threads_per_block, 1, 1), grid=(n_blocks, 1))
        return x_shifts, y_shifts, shifts_hot
    else:
        shifts = gp.empty((2, n_samples), np.float32)
        threads_per_block = 32
        n_blocks = int(math.ceil(n_samples / threads_per_block))
        gpu_vstack(y_shifts, x_shifts, shifts,
                   np.uint32(shifts.strides[0]/4), np.uint32(shifts.strides[1]/4),
                   np.uint32(n_samples),
                   block=(threads_per_block, 1, 1), grid=(n_blocks, 1))
        return x_shifts, y_shifts, shifts
Пример #29
0
def getCtInfo(id):
    infos = {'description':'','ip':[],'hostname':'','ram':0}
    infos['vmStatus'] = VE_STOPPED
    #infos['ips'] = getCtIp(id)
    
    f = open(VPS_CONF_DIR+'/'+id+'.conf')
    for line in f:
      line = re.sub('#.*','',line).strip()
      if line != '':
        m = re.search('([A-Z_]*)="(.*)"', line)
        key = m.group(1)
        value = m.group(2).replace('\\"','"')
        if   key == 'HOSTNAME':
          infos['hostname'] = value
        elif key == 'DESCRIPTION':
          infos['description'] = unescape(value)
        elif key == 'PRIVVMPAGES':
          privvmpages = int(ceil(int(value.split(':')[0])/256.0))
        elif key == 'LOCKEDPAGES':
          infos['ram'] = int(ceil(int(value.split(':')[0])/256.0))
        elif key == 'IP_ADDRESS':
            infos['ip'] = re.sub('\s+',' ',value).strip().split(' ')
            
    infos['swap'] = privvmpages - infos['ram']
    
    return infos
Пример #30
0
    def testFloor(self):
        self.assertRaises(TypeError, math.floor)
        # These types will be int in py3k.
        self.assertEquals(float, type(math.floor(1)))
        self.assertEquals(float, type(math.floor(1L)))
        self.assertEquals(float, type(math.floor(1.0)))
        self.ftest('floor(0.5)', math.floor(0.5), 0)
        self.ftest('floor(1.0)', math.floor(1.0), 1)
        self.ftest('floor(1.5)', math.floor(1.5), 1)
        self.ftest('floor(-0.5)', math.floor(-0.5), -1)
        self.ftest('floor(-1.0)', math.floor(-1.0), -1)
        self.ftest('floor(-1.5)', math.floor(-1.5), -2)
        # pow() relies on floor() to check for integers
        # This fails on some platforms - so check it here
        self.ftest('floor(1.23e167)', math.floor(1.23e167), 1.23e167)
        self.ftest('floor(-1.23e167)', math.floor(-1.23e167), -1.23e167)
        self.assertEquals(math.ceil(INF), INF)
        self.assertEquals(math.ceil(NINF), NINF)
        self.assert_(math.isnan(math.floor(NAN)))

        class TestFloor(object):
            def __float__(self):
                return 42.3
        class TestNoFloor(object):
            pass
        self.ftest('floor(TestFloor())', math.floor(TestFloor()), 42)
        self.assertRaises(TypeError, math.floor, TestNoFloor())

        t = TestNoFloor()
        t.__floor__ = lambda *args: args
        self.assertRaises(TypeError, math.floor, t)
        self.assertRaises(TypeError, math.floor, t, 0)
Пример #31
0
def get_course_info(request, course_id=0):
    """Returns JSON data about a course

    :param request: HTTP Request
    :type request: Request
    :param course_id: Unizin Course ID, defaults to 0
    :param course_id: int, optional
    :return: JSON to be used
    :rtype: str
    """
    course_id = canvas_id_to_incremented_id(course_id)
    today = timezone.now()

    try:
        course = Course.objects.get(id=course_id)
    except ObjectDoesNotExist:
        return HttpResponse("{}")

    course_resource_list = []
    try:
        resource_list = Resource.objects.get_course_resource_type(course_id)
        if resource_list is not None:
            logger.info(
                f"Course {course_id} resources data type are: {resource_list}")
            resource_defaults = settings.RESOURCE_VALUES
            for item in resource_list:
                result = utils.look_up_key_for_value(resource_defaults, item)
                if result is not None:
                    course_resource_list.append(result.capitalize())
            logger.info(
                f"Mapped generic resource types in a course {course_id}: {course_resource_list}"
            )
    except (ObjectDoesNotExist, Exception) as e:
        logger.info(
            f"getting the course {course_id} resources types has errors due to:{e}"
        )

    course_resource_list.sort()

    resp = model_to_dict(course)

    course_start, course_end = course.get_course_date_range()

    current_week_number = math.ceil((today - course_start).days / 7)
    total_weeks = math.ceil((course_end - course_start).days / 7)

    if course.term is not None:
        resp['term'] = model_to_dict(course.term)
    else:
        resp['term'] = None

    # Have a fixed maximum number of weeks
    if total_weeks > settings.MAX_DEFAULT_WEEKS:
        logger.debug(
            f'{total_weeks} is greater than {settings.MAX_DEFAULT_WEEKS} setting total weeks to default.'
        )
        total_weeks = settings.MAX_DEFAULT_WEEKS

    resp['current_week_number'] = current_week_number
    resp['total_weeks'] = total_weeks
    resp['course_view_options'] = CourseViewOption.objects.get(
        course=course).json(include_id=False)
    resp['resource_types'] = course_resource_list

    return HttpResponse(json.dumps(resp, default=str))
Пример #32
0
async def get_price_calendar(request):
    logger = logging.getLogger(__name__)
    body = request.json
    hotel_id = body.get("hotel_id", "")
    start_time = body.get("start_time")
    end_time = body.get("end_time")
    uid = ''.join(choices(alpha_set, k=24))
    logger.info(
        f"uid: {uid}.get {hotel_id} calendar with start_time: {start_time}, end_time: {end_time}"
    )
    if not hotel_id:
        logger.warning("hotel_id cannot be None")
        return rest_result(request, {
            "status": 400,
            "errmsg": "invalid hotel_id"
        })

    db = databases("scripture")
    hotel = await db["statics.hotels.prices"].find_one({"hotel_id": hotel_id})

    if not hotel:
        logger.info(f"hotel_id:{hotel_id} corresponding no hotel ")
        return rest_result(request, {"status": 200, "data": []})
    # type == 1 当日有价格;type == 0 当日无报价;type == -1 当日价格无效
    if not end_time:
        base_day = datetime.now()
        days = await get_calendar_days()
    else:
        base_day = datetime.strptime(start_time, "%Y-%m-%d")
        days = (datetime.strptime(end_time, "%Y-%m-%d") - base_day).days
    date = [(base_day + timedelta(days=i)).strftime("%Y-%m-%d")
            for i in range(int(days))]
    prices = {}
    for day in hotel["prices"]:
        if day["checkin"] not in date:
            continue
        price = day.get("without_tax_price") or day["price"]
        if isinstance(price, str):
            flag = 0
            price = "已抢光"
            special = False
        elif isinstance(price, int) or isinstance(price, float):
            flag = 1
            special = day.get("special", False)
            price = math.ceil(price)
        else:
            flag = -1
            special = False
        prices[day["checkin"]] = {
            "price": price,
            "type": flag,
            "checkin": day["checkin"],
            "checkout": day["checkout"],
            "special": special,
        }
        if "bug_price_type" in day:
            prices[day["checkin"]]["bug_price_type"] = day["bug_price_type"]

    for checkin in date:
        if checkin not in prices:
            prices[checkin] = {
                "price": "",
                "type": -1,
                "checkin": checkin,
                "special": False,
            }
    prices = list(prices.values())
    if hotel.get("selecting"):
        logger.info(f"{uid}_hotel_id:{hotel_id} Refreshing")
        return rest_result(request, {
            "status": 200,
            "data": prices,
            "msg": "Refreshing..."
        })
    else:
        logger.info(f"{uid}_hotel_id:{hotel_id} get price succeed")
        return rest_result(request, {"status": 200, "data": prices})
def get_pages(url):
    resp = requests.get(url=url, headers=HEADERS)
    text = resp.text
    html = etree.HTML(text)
    pages = math.ceil(float(html.xpath("//span[@class='lightblue total']/text()")[0])/10)
    return pages
Пример #34
0
import math


def calculateMonthsUntilPaidOff(bal, apr, monthly):
    time = -1 / 30 * math.log(1 + bal / monthly *
                              (1 -
                               (1 + apr / 365)**30)) / math.log(1 + apr / 365)
    time = bal * math.log(apr + monthly)
    return time


print("*" * 20 + "Pay off in months" + "*" * 20)

balance = int(raw_input("What is your balance "))
apr1 = int(raw_input("What is the APR in months as a percentage"))
month = int(raw_input("What is the monthly payment you can make: "))

func = math.ceil(calculateMonthsUntilPaidOff(balance, apr1, month))

print("It will take you " + str(func) + "months to pay")
Пример #35
0
    def random_rotate_img_bbox(self,
                               img,
                               text_polys,
                               degrees: numbers.Number or list or tuple
                               or np.ndarray,
                               same_size=False):
        """
        从给定的角度中选择一个角度,对图片和文本框进行旋转
        :param img: 图片
        :param text_polys: 文本框
        :param degrees: 角度,可以是一个数值或者list
        :param same_size: 是否保持和原图一样大
        :return: 旋转后的图片和角度
        """
        if isinstance(degrees, numbers.Number):
            if degrees < 0:
                raise ValueError(
                    "If degrees is a single number, it must be positive.")
            degrees = (-degrees, degrees)
        elif isinstance(degrees, list) or isinstance(
                degrees, tuple) or isinstance(degrees, np.ndarray):
            if len(degrees) != 2:
                raise ValueError(
                    "If degrees is a sequence, it must be of len 2.")
            degrees = degrees
        else:
            raise Exception(
                'degrees must in Number or list or tuple or np.ndarray')
        # ---------------------- 旋转图像 ----------------------
        w = img.shape[1]
        h = img.shape[0]
        angle = np.random.uniform(degrees[0], degrees[1])

        if same_size:
            nw = w
            nh = h
        else:
            # 角度变弧度
            rangle = np.deg2rad(angle)
            # 计算旋转之后图像的w, h
            nw = (abs(np.sin(rangle) * h) + abs(np.cos(rangle) * w))
            nh = (abs(np.cos(rangle) * h) + abs(np.sin(rangle) * w))
        # 构造仿射矩阵
        rot_mat = cv2.getRotationMatrix2D((nw * 0.5, nh * 0.5), angle, 1)
        # 计算原图中心点到新图中心点的偏移量
        rot_move = np.dot(rot_mat, np.array([(nw - w) * 0.5, (nh - h) * 0.5,
                                             0]))
        # 更新仿射矩阵
        rot_mat[0, 2] += rot_move[0]
        rot_mat[1, 2] += rot_move[1]
        # 仿射变换
        rot_img = cv2.warpAffine(img,
                                 rot_mat,
                                 (int(math.ceil(nw)), int(math.ceil(nh))),
                                 flags=cv2.INTER_LANCZOS4)

        # ---------------------- 矫正bbox坐标 ----------------------
        # rot_mat是最终的旋转矩阵
        # 获取原始bbox的四个中点,然后将这四个点转换到旋转后的坐标系下
        rot_text_polys = list()
        for bbox in text_polys:
            point1 = np.dot(rot_mat, np.array([bbox[0, 0], bbox[0, 1], 1]))
            point2 = np.dot(rot_mat, np.array([bbox[1, 0], bbox[1, 1], 1]))
            point3 = np.dot(rot_mat, np.array([bbox[2, 0], bbox[2, 1], 1]))
            point4 = np.dot(rot_mat, np.array([bbox[3, 0], bbox[3, 1], 1]))
            rot_text_polys.append([point1, point2, point3, point4])
        return rot_img, np.array(rot_text_polys, dtype=np.float32)
Пример #36
0
def shodan_search_worker(fk,
                         query,
                         search_type,
                         category,
                         country=None,
                         coordinates=None,
                         all_results=False):
    results = True
    page = 1
    SHODAN_API_KEY = keys['keys']['shodan']
    pages = 0
    screenshot = ""
    print(query)

    while results:
        if pages == page:
            results = False
            break

        # Shodan sometimes fails with no reason, sleeping when it happens and it prevents rate limitation
        search = Search.objects.get(id=fk)
        api = Shodan(SHODAN_API_KEY)
        fail = 0

        try:
            time.sleep(5)
            if coordinates:
                results = api.search("geo:" + coordinates + ",20 " + query,
                                     page)
            if country:
                results = api.search("country:" + country + " " + query, page)
        except:
            fail = 1
            print('fail1, sleeping...')

        if fail == 1:
            try:
                time.sleep(10)
                if coordinates:
                    results = api.search("geo:" + coordinates + ",20 " + query,
                                         page)
                if country:
                    results = api.search("country:" + country + " " + query,
                                         page)
            except Exception as e:
                print(e)

        if fail == 1:
            try:
                time.sleep(10)
                if coordinates:
                    results = api.search("geo:" + coordinates + ",20 " + query,
                                         page)
                if country:
                    results = api.search("country:" + country + " " + query,
                                         page)
            except Exception as e:
                print(e)

        if fail == 1:
            try:
                time.sleep(10)
                if coordinates:
                    results = api.search("geo:" + coordinates + ",20 " + query,
                                         page)
                if country:
                    results = api.search("country:" + country + " " + query,
                                         page)
            except Exception as e:
                results = False
                print(e)

        try:
            total = results['total']

            if total == 0:
                print("no results")
                break
        except Exception as e:
            print(e)
            break

        pages = math.ceil(total / 100) + 1
        print(pages)

        for counter, result in enumerate(results['matches']):
            lat = str(result['location']['latitude'])
            lon = str(result['location']['longitude'])
            city = ""
            indicator = []
            # print(counter)
            # time.sleep(20)

            try:
                product = result['product']
            except:
                product = ""

            if 'vulns' in result:
                vulns = [*result['vulns']]
            else:
                vulns = ""

            if result['location']['city'] != None:
                city = result['location']['city']

            hostnames = ""
            try:
                if 'hostnames' in result:
                    hostnames = result['hostnames'][0]
            except:
                pass

            try:
                if 'SAILOR' in result['http']['title']:
                    html = result['http']['html']
                    soup = BeautifulSoup(html)
                    for gps in soup.find_all("span", {"id": "gnss_position"}):
                        coordinates = gps.contents[0]
                        space = coordinates.split(' ')
                        if "W" in space:
                            lon = "-" + space[2][:-1]
                        else:
                            lon = space[2][:-1]
                        lat = space[0][:-1]
            except Exception as e:
                pass

            if 'opts' in result:
                try:
                    screenshot = result['opts']['screenshot']['data']

                    with open(
                            "app_kamerka/static/images/screens/" +
                            result['ip_str'] + ".jpg", "wb") as fh:
                        fh.write(base64.b64decode(screenshot))
                        fh.close()
                        for i in result['opts']['screenshot']['labels']:
                            indicator.append(i)
                except Exception as e:
                    pass

            if query == "Niagara Web Server":
                try:
                    soup = BeautifulSoup(result['http']['html'],
                                         features="html.parser")
                    nws = soup.find("div", {"class": "top"})
                    indicator.append(nws.contents[0])
                except:
                    pass

            # get indicator from niagara fox
            if result['port'] == 1911 or result['port'] == 4911:
                try:
                    fox_data_splitted = result['data'].split("\n")
                    for i in fox_data_splitted:
                        if "station.name" in i:
                            splitted = i.split(":")
                            indicator.append(splitted[1])
                except:
                    pass

            # get indicator from tank
            if result['port'] == 10001:
                try:
                    tank_info = result['data'].split("\r\n\r\n")
                    indicator.append(tank_info[1])
                except:
                    pass

            if result['port'] == 2000:
                try:
                    ta_data = result['data'].split("\\n")
                    indicator.append(ta_data[1][:-3])
                except Exception as e:
                    pass

            if result['port'] == 502:
                try:
                    sch_el = result['data'].split('\n')
                    if sch_el[4].startswith("-- Project"):
                        indicator.append(sch_el[4].split(": ")[1])
                except:
                    pass

            if "GPGGA" in result['data']:
                try:
                    splitted_data = result['data'].split('\n')
                    for i in splitted_data:
                        if "GPGGA" in i:
                            msg = pynmea2.parse(i)
                            lat = msg.latitude
                            lon = msg.longitude
                            break
                except Exception as e:
                    print(e)

            if result['port'] == 102:
                try:
                    s7_data = result['data'].split("\n")
                    for i in s7_data:
                        if i.startswith("Plant"):
                            indicator.append(i.split(":")[1])
                        if i.startswith("PLC"):
                            indicator.append(i.split(":")[1])
                        if i.startswith("Module name"):
                            indicator.append(i.split(":")[1])
                except:
                    pass
            # get indicator from bacnet
            if result['port'] == 47808:
                try:
                    bacnet_data_splitted = result['data'].split("\n")
                    for i in bacnet_data_splitted:
                        if "Description" in i:
                            splitted1 = i.split(":")
                            indicator.append(splitted1[1])
                        if "Object Name" in i:
                            splitted2 = i.split(":")
                            indicator.append(splitted2[1])

                        if "Location" in i:
                            splitted3 = i.split(":")
                            indicator.append(splitted3[1])
                except:
                    pass

            device = Device(search=search,
                            ip=result['ip_str'],
                            product=product,
                            org=result['org'],
                            data=result['data'],
                            port=str(result['port']),
                            type=search_type,
                            city=city,
                            lat=lat,
                            lon=lon,
                            country_code=result['location']['country_code'],
                            query=search_type,
                            category=category,
                            vulns=vulns,
                            indicator=indicator,
                            hostnames=hostnames,
                            screenshot=screenshot)
            device.save()

        page = page + 1
        if not all_results:
            results = False
Пример #37
0
 def pages(self):
     if self.page_size:
         return int(ceil(self.total / float(self.page_size)))
     else:
         return 1
Пример #38
0
def unique_string(length=UUID_LENGTH):
    '''Generate a unique string'''
    # We need a string at least as long as length
    string = str(uuid4()) * int(math.ceil(length / float(UUID_LENGTH)))
    return string[:length] if length else string
Пример #39
0
t=int(input())
import math
for i in range(t):
    n,k=list(map(int,input().split()))
    s=input()
    oneindex=[]
    count=0
    for j in range(n):
        if s[j]=='1':
            oneindex.append(j)
    if len(oneindex)<2:
        if len(oneindex)==0:
            print(math.ceil(n/(k+1)))
            continue
        else:
            print(math.floor(n / (k + 1)))
            continue
    for j in range(len(oneindex)):
        if j==len(oneindex)-1:
            if abs(n-oneindex[j]+oneindex[0])>=2*k+1:
                count+=1
            continue
        if abs(oneindex[j]-oneindex[j+1])>=2*k+1:
            count+=1
    print(count)
Пример #40
0
async def get_bug_price(request):
    """
    仅读取超低价数据,计算日期范围内的均价、众数、最大值
    """
    logger = logging.getLogger(__name__)
    body = request.json
    cms_ids = body.get("cms_ids", [])
    start_time = body.get("start_time", datetime.now().strftime("%Y-%m-%d"))
    num = body.get("num", 1)
    try:
        start_time = datetime.strptime(start_time, "%Y-%m-%d")
    except:
        logger.error(f"invalid start_time: {body['start_time']}")
        return rest_result(
            request,
            {
                "status":
                400,
                "errmsg":
                f"invalid start_time: {body['start_time']} should be YYYY-mm-dd",
            },
        )
    end_time = body.get("end_time", "")
    if not end_time:
        days = await get_calendar_days()
        end_time = start_time + timedelta(days=days)
    else:
        try:
            end_time = datetime.strptime(end_time, "%Y-%m-%d")
        except:
            logger.error(f"invalid end_time: {body['end_time']}")
            return rest_result(
                request,
                {
                    "status":
                    400,
                    "errmsg":
                    f"invalid end_time: {body['end_time']} should be YYYY-mm-dd",
                },
            )
    show_detail = body.get("show_detail", False)
    result = {}
    db = databases("scripture")
    for cid in cms_ids:
        result[cid] = {}
        data = await db["statics.hotels.prices"].find_one({"hotel_id": cid})
        cid_result = {}
        max_price = 0
        mode_price_num = 0
        mode_price = 0
        price_result = []
        for price in data.get("prices", []):
            if (not price or isinstance(price.get("price", ""), (str, bool))
                    or start_time > datetime.strptime(price["checkin"],
                                                      "%Y-%m-%d") or end_time <
                    datetime.strptime(price["checkin"], "%Y-%m-%d")):
                continue
            # 最低价且离当前日期最近的一天
            if price.get("bug_price_type") and (
                    "price" not in result[cid]
                    or result[cid]["price"]['price'] > price["price"]):
                result[cid]["price"] = {
                    "checkin": price["checkin"],
                    "price": price["price"],
                }
            _mode_price = int(f"{str(int(price['price']))[:-2]}00") + 100
            checkin = price["checkin"]
            if _mode_price not in cid_result:
                cid_result[_mode_price] = 0
            cid_result[_mode_price] += 1
            if cid_result[_mode_price] > mode_price_num:
                mode_price_num = cid_result[_mode_price]
                mode_price = _mode_price
            if price["price"] > max_price:
                max_price = price["price"]
            price_result.append(price["price"])
        if show_detail:
            if len(price_result):
                avg_price = math.ceil(sum(price_result) / len(price_result))
            else:
                avg_price = "无有效报价!"
            result[cid]["max_price"] = max_price
            result[cid]["avg_price"] = avg_price
            result[cid]["mode_prive"] = mode_price
    return rest_result(request, {"status": 200, "data": result})
Пример #41
0
def round_up_order(x, order=0):
	""" Round x to the closest higher number of order 'order' """
	factor = 10 ** order
	reduced = x / factor
	return math.ceil(reduced) * factor
Пример #42
0
def record_demonstrations(task_name, n_demos_list, keep_only_wins_by, n_different_init, init_gstate=None):
    path = Path('..') / 'data' / task_name
    path.mkdir(parents=True, exist_ok=True)
    env = make_unwrapped_env(task_name)
    pbar = tqdm(total=sum(n_demos_list))

    # we collect sets of demonstrations that have a varying number of demonstrations in it

    for n_demos in n_demos_list:
        demos_list = []
        infos_list = []
        game_states_list = []
        keep_only_wins_by = set(keep_only_wins_by) if keep_only_wins_by is not None else None

        if init_gstate is not None:
            n_different_init = 1
            new_init_interval = np.inf

        elif n_different_init is None or n_different_init >= n_demos:
            n_different_init = n_demos
            new_init_interval = 1

        else:
            n_different_init = n_different_init
            new_init_interval = int(math.ceil(n_demos / n_different_init))

        # we collect all the demonstrations for this set

        demo_i = 0
        while demo_i < n_demos:

            if init_gstate is not None:
                # use provided init_gstate
                state = reset_unwrapped_env_to_init_state(unwrapped_env=env, init_game_state=init_gstate)

            elif demo_i % new_init_interval == 0:
                # get new initialisation state
                state = reset_unwrapped_env_to_init_state(unwrapped_env=env, init_game_state=None)
                fixed_init_gstate = get_game_state(env)

            else:
                # re-use fixed initialisation state
                state = reset_unwrapped_env_to_init_state(unwrapped_env=env, init_game_state=fixed_init_gstate)

            # initialise containers

            gstate = get_game_state(env)
            transitions = []
            infos = []
            game_states = [gstate]

            # run an episode

            done = False
            while not done:
                actions = env.act(state)
                next_state, reward, done, info = env.step(actions)
                transitions.append([state, actions, next_state, reward, ml.mask(done)])
                infos.append(info)
                gstate = get_game_state(env)
                game_states.append(gstate)
                state = next_state

            # we record the demo only if the desired player wins or if we have no desired player

            if (keep_only_wins_by is None) \
                    or (
                    infos[-1]['result'] == Result.Win
                    and set(infos[-1]['winners']).intersection(keep_only_wins_by)):
                demos_list.append(transitions)
                infos_list.append(infos)
                game_states_list.append(game_states)
                pbar.update()
                demo_i += 1

        demonstrations = {'trajectories': demos_list, 'infos': infos_list, 'game_states': game_states_list}

        if keep_only_wins_by is None:
            suggested_path = path / f'expertDemo{n_demos}_' \
                                    f'nDifferentInit{n_different_init}.pkl'
        else:
            winners = "-".join([f"{w}" for w in sorted(list(keep_only_wins_by))])
            suggested_path = path / f'expertDemo{n_demos}_' \
                                    f'winsFrom{winners}_' \
                                    f'nDifferentInit{n_different_init}.pkl'

        return demonstrations, suggested_path
Пример #43
0
    def densify(self, geometry):
        gtype = geometry.GetGeometryType()
        if not (gtype == ogr.wkbLineString or gtype == ogr.wkbMultiLineString):
            raise Exception("The densify function only works on linestring or multilinestring geometries")

        g = ogr.Geometry(ogr.wkbLineString)

        # add the first point
        x0 = geometry.GetX(0)
        y0 = geometry.GetY(0)
        g.AddPoint(x0, y0)

        for i in range(1,geometry.GetPointCount()):
            threshold = self.options.distance
            x1 = geometry.GetX(i)
            y1 = geometry.GetY(i)
            if not x0 or not y0:
                raise Exception("First point is null")
            d = self.distance(x0, x1, y0, y1)

            if self.options.remainder.upper() == "UNIFORM":
                if d != 0.0:
                    threshold = float(d)/math.ceil(d/threshold)
                else:
                    # duplicate point... throw it out
                    continue
            if (d > threshold):
                if self.options.remainder.upper() == "UNIFORM":
                    segcount = int(math.ceil(d/threshold))

                    dx = (x1 - x0)/segcount
                    dy = (y1 - y0)/segcount

                    x = x0
                    y = y0
                    for p in range(1,segcount):
                        x = x + dx
                        y = y + dy
                        g.AddPoint(x, y)

                elif self.options.remainder.upper() == "END":
                    segcount = int(math.floor(d/threshold))
                    xa = None
                    ya = None
                    for p in range(1,segcount):
                        if not xa:
                            xn, yn = self.calcpoint(x0,x1,y0,y1,threshold)
                            d = self.distance(x0, xn, y0, yn)
                            xa = xn
                            ya = yn
                            g.AddPoint(xa,ya)
                            continue
                        xn, yn = self.calcpoint(xa, x1, ya, y1, threshold)
                        xa = xn
                        ya = yn
                        g.AddPoint(xa,ya)

                elif self.options.remainder.upper() == "BEGIN":

                    # I think this might put an extra point in at the end of the
                    # first segment
                    segcount = int(math.floor(d/threshold))
                    xa = None
                    ya = None
                    #xb = x0
                    #yb = y0
                    remainder = d % threshold
                    for p in range(segcount):
                        if not xa:
                            xn, yn = self.calcpoint(x0,x1,y0,y1,remainder)

                            d = self.distance(x0, xn, y0, yn)
                            xa = xn
                            ya = yn
                            g.AddPoint(xa,ya)
                            continue
                        xn, yn = self.calcpoint(xa, x1, ya, y1, threshold)
                        xa = xn
                        ya = yn
                        g.AddPoint(xa,ya)

            g.AddPoint(x1,y1)
            x0 = x1
            y0 = y1

        return g
def largest_prime_factor(number):
    for x in range(ceil(sqrt(number)), 0, -1):
        if number % x == 0 and is_prime(x):
            return x
    return None
Пример #45
0
if str(options.zlevel) == "None":
    z = None
    z_size = None
    if "z=" in mrf_size:
        print "Error: z-level must be specified for this input"
        exit(1)
else:
    z = options.zlevel
    z_size = sizes[2]
    if options.verbose:
        print "Using z-level:" + str(z) + " and MRF z-size:" + str(z_size)
    if z >= z_size:
        print "Error: Specified z-level is greater than the maximum size"
        exit(1)

w = int(math.ceil(float(mrf_x) / 512))
h = int(math.ceil(float(mrf_y) / 512))
if z >= 0:
    len_base = w * h * z_size
    low = z_size
else:
    len_base = w * h
    low = 1
idx_size = os.path.getsize(index)
len_tiles = idx_size / 16

if options.verbose:
    print "Number of tiles " + str(len_tiles)
    print "\n--Pyramid structure--"

levels = []
Пример #46
0
 def pad_with_spaces(string, cell_width):
     left_padding = math.ceil((cell_width - len(string)) / 2)
     right_padding = math.floor((cell_width - len(string)) / 2)
     return " " * left_padding + string + right_padding * " "
Пример #47
0
def evaluate(simulation, commands):
    warehouses: Dict[int, Warehouse] = {w.id: w for w in deepcopy(simulation.i_warehouses)}
    orders: Dict[int, Order] = {o.id: o for o in deepcopy(simulation.i_orders)}
    drones: Dict[int, Drone] = {d.id: d for d in deepcopy(simulation.i_drones)}
    products: Dict[int, Product] = {p.id: p for p in simulation.products}
    max_cargo = simulation.max_cargo
    max_turns = simulation.max_turns

    drone_to_delivery_time: Dict[Drone, int] = DefaultDict(int)
    order_to_delivery_time: Dict[Order, list] = DefaultDict(list)
    score = 0
    deliv_max = 0

    for i, command in enumerate(commands):
        drone_id, str_command, destination_id, product_id, quantity = command.split(" ")

        drone_id = int(drone_id)
        destination_id = int(destination_id)
        product_id = int(product_id)
        quantity = int(quantity)

        drone = drones[drone_id]
        product = products[product_id]
        basket = {product: quantity}

        if str_command == "L":
            warehouse = warehouses[destination_id]

            if warehouse.products.get(product, 0) < quantity:
                raise ValueError(f"Command {i}: {warehouse} have not enough {product}.")
            warehouse.remove_products(basket)

            drone.add_products(basket)
            if drone.product_weight > max_cargo:
                raise ValueError(f"Command {i}: {drone} overloaded.")

            drone_to_delivery_time[drone] += drone.distanceTo(warehouse) + 1
            drone.set_position(warehouse.position)

        elif str_command == "D":
            order = orders[destination_id]
            if order.is_complete():
                raise ValueError(
                    f"Command {i}: the {order} is closed, nothing can be delivered there."
                )

            if drone.products.get(product, 0) < quantity:
                raise ValueError(f"Command {i}: {drone} have not enough {product}.")
            drone.remove_products(basket)

            if order.products.get(product, 0) < quantity:
                raise ValueError(f"Command {i}: Too many {product} for {order}.")
            order.remove_products(basket)

            drone_to_delivery_time[drone] += drone.distanceTo(order) + 1
            drone.set_position(order.position)
            order_to_delivery_time[order].append(drone_to_delivery_time[drone] - 1)

            if order.is_complete():
                delivery_time = max(order_to_delivery_time[order])
                if delivery_time > deliv_max:
                    deliv_max = delivery_time
                if delivery_time < max_turns:
                    sc = ceil(100 * (max_turns - delivery_time) / max_turns)
                    score += sc
                else:
                    raise ValueError(f"Command {i}: Run out of time.")
        else:
            raise ValueError(f"Command {i}: Unknown command {str_command}.")

    num_orders = len(orders)
    return score, deliv_max, score / num_orders, num_orders
def evaluate_params(evaluateFunc, params, objectiveParams, urdfRoot='', timeStep=0.01, maxNumSteps=10000, sleepTime=0):
  print('start evaluation')
  beforeTime = time.time()
  p.resetSimulation()

  p.setTimeStep(timeStep)
  p.setAdditionalSearchPath(pybullet_data.getDataPath())

  p.loadURDF("%s/plane.urdf" % urdfRoot)
  p.setGravity(0,0,-9.81)

  mass = 1
  visualShapeId = -1
  cube =p.createCollisionShape(p.GEOM_MESH,fileName="cube.obj",flags=p.GEOM_FORCE_CONCAVE_TRIMESH, meshScale=[1,1,1])
  orn = p.getQuaternionFromEuler([0,0,0])
  print(orn)
  p.createMultiBody (0,cube, baseOrientation=orn, basePosition=[-1,1,0])
  p.createMultiBody (0,cube, baseOrientation=orn, basePosition=[-2,2,0])
  p.createMultiBody (0,cube, baseOrientation=orn, basePosition=[-1,-1,0])



  global minitaur
  minitaur = Minitaur(urdfRoot)
  start_position = current_position()
  last_position = None  # for tracing line
  total_energy = 0



#########---------------START Obstacle Avoidance----------########

  ray_length = 1
  angle_swept = 130
  step = math.ceil(100*angle_swept/p.MAX_RAY_INTERSECTION_BATCH_SIZE)/100
  angles = np.arange(-angle_swept/2, angle_swept/2, step) * np.pi / 180 #angle of rotations
  num_angles = np.shape(angles)[0]
  rays = np.concatenate(([ray_length*np.sin(angles)], [ray_length*np.cos(angles)], [np.zeros(num_angles)]), axis=0)
  rot = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 0]])
  offset = np.array([-0.33, 0, 0.07])


  for i in range(maxNumSteps):
    matrix = p.getMatrixFromQuaternion(minitaur.getBaseOrientation())
    matrix = np.reshape(matrix, (3, 3))

    src = np.array(minitaur.getBasePosition()) 
    src = src + np.matmul(matrix,offset)

    rays_src = np.repeat([src], num_angles, axis=0)

    orn = np.matmul(matrix, rot) #rotates unit vector y to -x
    rays_end = np.matmul(orn, rays) # unit vector in direction of minitaur

    rays_end = (rays_end + src[:, None]).T
    rays_info = p.rayTestBatch(rays_src.tolist(), rays_end.tolist())

    h = 10

    b = np.asarray([int(i[0]) for i in rays_info])

    for i in range(h-1):
      rays = np.concatenate(([ray_length*np.sin(angles)], [ray_length*np.cos(angles)], [np.full((num_angles,), i+1)]), axis=0)

      rays_end = np.matmul(orn, rays) # unit vector in direction of minitaur
      rays_end = (rays_end + src[:, None]).T

      rays_info = p.rayTestBatch(rays_src.tolist(), rays_end.tolist())

      b = np.vstack((b, np.asarray([int(i[0]) for i in rays_info])))

    nth_ray = find_largest_gap(b)

    deg = 1.*angle_swept*nth_ray/b.shape[1] - angle_swept/2.
    print("Rotate {:.1f}degrees".format(deg))


#########---------------END Obstacle Avoidance----------########


    torques = minitaur.getMotorTorques()
    velocities = minitaur.getMotorVelocities()
    total_energy += np.dot(np.fabs(torques), np.fabs(velocities)) * timeStep

    joint_values = evaluate_func_map[evaluateFunc](i, params)

    minitaur.applyAction(joint_values, go_straight)
    p.stepSimulation()
    if (is_fallen()):
      break


    if i % 100 == 0:
      sys.stdout.write('.')
      sys.stdout.flush()
    time.sleep(sleepTime)

  print(' ')

  alpha = objectiveParams[0]
  final_distance = np.linalg.norm(start_position - current_position())
  finalReturn = final_distance - alpha * total_energy
  elapsedTime = time.time() - beforeTime
  print ("trial for ", params, " final_distance", final_distance, "total_energy", total_energy, "finalReturn", finalReturn, "elapsed_time", elapsedTime)
  return finalReturn
Пример #49
0
def train(model, X_train=None, Y_train=None, save=False,
          predictions_adv=None, evaluate=None,
          args=None, rng=None, var_list=None,
          attack=None, attack_args=None):
    """
    Train a TF Eager model
    :param model: instance of cleverhans model, takes in input batch,
                    gives out probs(softmax layer).
    :param X_train: numpy array with training inputs
    :param Y_train: numpy array with training outputs
    :param save: boolean controlling the save operation
    :param predictions_adv: if set with the adversarial example tensor,
                            will run adversarial training
    :param evaluate: function that is run after each training iteration
                     (typically to display the test/validation accuracy).
    :param args: dict or argparse `Namespace` object.
                 Should contain `nb_epochs`, `learning_rate`,
                 `batch_size`
                 If save is True, should also contain 'train_dir'
                 and 'filename'
    :param rng: Instance of numpy.random.RandomState
    :param var_list: List of variables to train.
    :param attack: Instance of the class cleverhans.attacks.attacks_eager
    :param attack_args: Parameters required for the attack.
    :return: True if model trained
    """
    args = _ArgsWrapper(args or {})
    if ((attack is None) != (attack_args is None)):
        raise ValueError("attack and attack_args must be "
                         "passed together.")
    if X_train is None or Y_train is None:
        raise ValueError("X_train argument and Y_train argument "
                         "must be supplied.")
    # Check that necessary arguments were given (see doc above)
    assert args.nb_epochs, "Number of epochs was not given in args dict"
    assert args.learning_rate, "Learning rate was not given in args dict"
    assert args.batch_size, "Batch size was not given in args dict"

    if save:
        assert args.train_dir, "Directory for save was not given in args dict"
        assert args.filename, "Filename for save was not given in args dict"

    if rng is None:
        rng = np.random.RandomState()

    # Optimizer
    tfe = tf.contrib.eager
    optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
    batch_x = tfe.Variable(X_train[0:args.batch_size], dtype=tf.float32)
    batch_y = tfe.Variable(Y_train[0:args.batch_size], dtype=tf.float32)

    # One epoch of training.
    for epoch in xrange(args.nb_epochs):
        # Compute number of batches
        nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
        assert nb_batches * args.batch_size >= len(X_train)

        # Indices to shuffle training set
        index_shuf = list(range(len(X_train)))
        rng.shuffle(index_shuf)

        prev = time.time()
        for batch in range(nb_batches):

            # Compute batch start and end indices
            start, end = batch_indices(
                batch, len(X_train), args.batch_size)

            # Perform one training step
            tf.assign(batch_x, X_train[index_shuf[start:end]])
            tf.assign(batch_y, Y_train[index_shuf[start:end]])
            # Compute grads
            with tf.GradientTape() as tape:
                # Define loss
                loss_clean_obj = LossCrossEntropy(model, smoothing=0.)
                loss_clean = loss_clean_obj.fprop(x=batch_x, y=batch_y)
                loss = loss_clean
                # Adversarial training
                if attack is not None:
                    batch_adv_x = attack.generate(batch_x, **attack_args)
                    loss_adv_obj = LossCrossEntropy(model, smoothing=0.)
                    loss_adv = loss_adv_obj.fprop(x=batch_adv_x, y=batch_y)
                    loss = (loss_clean + loss_adv) / 2.0
            # Apply grads
            model_variables = model.get_params()
            grads = tape.gradient(loss, model_variables)
            optimizer.apply_gradients(zip(grads, model_variables))

        assert end >= len(X_train)  # Check that all examples were used
        cur = time.time()
        _logger.info("Epoch " + str(epoch) + " took " +
                     str(cur - prev) + " seconds")
        if evaluate is not None:
            evaluate()

    if save:
        save_path = os.path.join(args.train_dir, args.filename)
        saver = tf.train.Saver()
        saver.save(save_path, model_variables)
        _logger.info("Completed model training and saved at: " +
                     str(save_path))
    else:
        _logger.info("Completed model training.")

    return True
Пример #50
0
 def get_range() -> Tuple[int, int]:
     low = safe_min / multiple_of
     high = safe_max / multiple_of
     low, high = sorted((low, high))
     return math.ceil(low), math.floor(high)
def run_model(session,
              pred,
              X,
              y,
              is_training,
              loss_val,
              Xdata,
              ydata,
              epochs=1,
              batch_size=64,
              print_every=100,
              train_step=None,
              plot_losses=False):
    # Compute accuracy using tf
    correct_prediction = tf.equal(tf.argmax(pred, axis=1), y)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    # Shuffle indices
    N = Xdata.shape[0]
    train_indices = np.arange(N)
    np.random.shuffle(train_indices)

    is_training_mode = train_step is not None

    # Set up variables for computation and optimization
    variables = [mean_loss, correct_prediction, accuracy]
    if is_training_mode:
        variables[-1] = train_step

    iter_counter = 0
    for e in range(epochs):
        # Keep track of losses and accuracy
        num_correct = 0
        losses = []
        for i in range(int(math.ceil(N / batch_size))):
            # Generate indices for the batch
            start_idx = (i * batch_size) % N
            idx = train_indices[start_idx:start_idx + batch_size]

            feed_dict = {
                X: Xdata[idx, :],
                y: ydata[idx],
                is_training: is_training_mode
            }

            actual_batch_size = ydata[idx].shape[0]

            # Compute loss and number of correct predictions
            loss, corr, _ = session.run(variables, feed_dict=feed_dict)

            losses.append(loss * actual_batch_size)
            num_correct += float(np.sum(corr))

            if is_training_mode and (iter_counter % print_every) == 0:
                print "Iteration {0}: with minibatch training loss = {1:.3g} and accuracy of {2:.2g}".format(
                    iter_counter, loss,
                    float(np.sum(corr)) / actual_batch_size)

            iter_counter += 1

        accuracy = num_correct / N
        avg_loss = np.sum(losses) / N
        print "Epoch {0}, overall loss = {1:.3g} and accuracy = {2:.3g}".format(
            e + 1, avg_loss, accuracy)

        if plot_losses:
            plt.plot(losses)
            plt.grid(True)
            plt.title('Epoch {} Loss'.format(e + 1))
            plt.xlabel('minibatch number')
            plt.ylabel('minibatch loss')
            plt.show()
    return avg_loss, accuracy
Пример #52
0
 def dealer_max_badges(self):
     return c.MAX_DEALERS or math.ceil(self.tables) + 1
Пример #53
0
async def switch_call(data: dict):
    """Register details of a call that was made and calcualte the cost of the call.
        - Request data:
            {
                "calling": "381211234567",
                "called": "38164111222333",
                "start": "2019-05-23T21:03:33.30Z",
                "duration": "450"
            }

    :param data:
    :return:
        - 200  Call accepted
            - Headers
                - Content-type: application/json
            - Body
                {
                    "calling": "381211234567",
                    "called": "38164111222333",
                    "start": "2019-05-23T21:03:33.30Z",
                    "duration": "350",
                    "rounded": "355",
                    "price": "0.4",
                    "cost": "2.367"
                }
        - 400  Incorrect input
            - Headers
                - Content-type: application/json
            - Body
                {
                    "message": "Incorrect input"
                }
        - 400  Incorrect input
            - Headers
                - Content-type: application/json
            - Body
                {
                    "message": "Error occurred"
                }
    """
    try:
        call_object = CallData(**data)

        call_stats = get_call_stats_from_csv(
            calling=call_object.called,
            time=call_object.start.isoformat() + 'Z',
            dataframe=app.df
        )
        if not call_stats:
            return ERROR_400

        cost = calculate_cost(
            initial=int(call_stats["initial"]),
            duration=int(call_object.duration),
            increment=int(call_stats["increment"]),
            rate=float(call_stats["price"])
        )
        rounded = math.ceil((int(call_stats["initial"]) + int(call_object.duration)) / \
        int(call_stats["increment"])) * int(call_stats["increment"])
        call_object.price = call_stats["price"]
        call_object.cost = cost
        call_object.rounded = rounded

        async with app.postgres.acquire() as con:
            await con.execute(
                ADD_CALLS_RECORD,
                int(call_object.calling),
                int(call_object.called),
                call_object.start.replace(tzinfo=None),
                call_object.duration,
                call_object.rounded,
                float(call_object.price),
                float(call_object.cost)
            )
            # call_object.id = str(row.inserted_id)
            # response = {key: str(v) for key, v in call_object.dict().items()}
            response = call_object.dict()
    except pydanticValidationError:
        response = Response(
            content='{"message": "Incorrect input"}',
            status_code=400,
            headers={
                "Content-type": "application/json"
            }
        )
    except Exception as e:
        raise e
        # response = Response(
        #     content='{"message": "Error occurred"}',
        #     status_code=400,
        #     headers={
        #         "Content-type": "application/json"
        #     }
        # )
    return response
Пример #54
0
def model_eval(model, X_test=None, Y_test=None, args=None,
               attack=None, attack_args=None):
    """
    Compute the accuracy of a TF Eager model on some data
    :param model: instance of cleverhans.model.Model_Eager
                    with pretrained weights for evaluation.
    :param X_test: numpy array with training inputs
    :param Y_test: numpy array with training outputs
    :param args: dict or argparse `Namespace` object.
                 Should contain `batch_size`
    :param attack: instance of the class cleverhans.attacks.attacks_eager
    :param attack_args: parameters required for the attack.
    :return: a float with the accuracy value
    """
    args = _ArgsWrapper(args or {})

    if ((attack is None) != (attack_args is None)):
        raise ValueError("attack and attack_args must be "
                         "passed together.")
    assert args.batch_size, "Batch size was not given in args dict"
    if X_test is None or Y_test is None:
        raise ValueError("X_test argument and Y_test argument "
                         "must be supplied.")

    # Define accuracy symbolically
    if LooseVersion(tf.__version__) <= LooseVersion('1.0.0'):
        raise Exception('Use Tensorflow Version greather than 1.0.0')

    # Init result var
    accuracy = 0.0

    # Compute number of batches
    nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
    assert nb_batches * args.batch_size >= len(X_test)

    X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
                     dtype=X_test.dtype)
    Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
                     dtype=Y_test.dtype)

    tfe = tf.contrib.eager
    batch_x = tfe.Variable(X_test[0:args.batch_size], dtype=tf.float32)
    batch_y = tfe.Variable(Y_test[0:args.batch_size], dtype=tf.float32)
    for batch in range(nb_batches):
        if batch % 100 == 0 and batch > 0:
            _logger.debug("Batch " + str(batch))

        # Must not use the `batch_indices` function here, because it
        # repeats some examples.
        # It's acceptable to repeat during training, but not eval.
        start = batch * args.batch_size
        end = min(len(X_test), start + args.batch_size)

        # The last batch may be smaller than all others. This should not
        # affect the accuarcy disproportionately.
        cur_batch_size = end - start
        X_cur[:cur_batch_size] = X_test[start:end]
        Y_cur[:cur_batch_size] = Y_test[start:end]
        tf.assign(batch_x, X_cur)
        tf.assign(batch_y, Y_cur)
        if attack is not None:
            batch_adv_x = attack.generate(batch_x, **attack_args)
            predictions = model(batch_adv_x)
        else:
            predictions = model(batch_x)
        cur_corr_preds = tf.equal(tf.argmax(batch_y, axis=-1),
                                  tf.argmax(predictions, axis=-1))

        accuracy += cur_corr_preds.numpy()[:cur_batch_size].sum()

    assert end >= len(X_test)

    # Divide by number of examples to get final value
    accuracy /= len(X_test)

    return accuracy
Пример #55
0
 def generate_grid_positions(self, num_items, num_rows):
     if num_rows==0:
       return []
     positions = [(y, x) for x in range(int((math.ceil(float(num_items) / num_rows)))) for y in range(num_rows)]
     positions = positions[:num_items]
     return positions
Пример #56
0
def form_post():

    cityid = None
    city = request.form['city'].capitalize()
    triplen = int(request.form['date'])
    with open('city.list.json/city.list.json', encoding="utf-8") as f:
        try:
            data = json.load(f)
        except:
            pass
    datalist =  []   
    for i in data:
        try:
            datalist.append((i['name'], i['id']))
        except:
            pass  
    for x in datalist:
        try:
            if x[0] == city:
                cityid = x[1]
                break
        except:
            print('error')
            pass
    if cityid == None:
        return 'no id found'
    s = 'http://api.openweathermap.org/data/2.5/weather?q={c}&APPID=386aa376a85049b9a45c5fb223f1a691'.format(c=city)
    r = requests.get(s)
    cityfile = r.json()
    pprint(cityfile)
    temp = float(str((cityfile['main']['temp'])-273.15)[:4])
    cloudcover = (cityfile['clouds']['all'])
    description = cityfile['weather'][0]['description']
    genweather = cityfile['weather'][0]['main']
    ''''''
    essentials = '''\
Chapstick
Vitamins
Meds
Pain reliever pills
Wallet
Casual watch
House key
Glasses
Sunglasses
Ear plugs
Eye mask
Book
Camera
Cell phone
Cell phone charger
Boarding pass
Printed trip itinerary
Hand sanitizer
Appropriate currency'''
    
    toiletries = """\
Contact solution
Perfume
Nail clippers
Tweezers
Makeup
Toothbrush
Toothpaste
Hairbrush
Floss
Deodorant
Shaver
Shaving cream/gel
Contacts
Q-tips"""
    essentials = essentials.split('\n')
    toiletries = toiletries.split('\n')
    packbyweather = [] 
    if 'rain' in description:
        packbyweather.extend([item for item in ['Umbrella', 'Raincoat', 'Rain boots']])
    if 'clouds' in description:
        packbyweather.extend([item for item in ['Hoodie', 'Jacket', 'Coat']])
    if 'sun' in description:
        packbyweather.extend([item for item in ['Sun screen', 'Hat', 'Light Jacket']])
    if 'wind' in description:
        packbyweather.extend([item for item in ['Wind Breaker', 'Vaseline', 'Scarf', 'Allergy Medication']])
    if 'snow' in description:
        packbyweather.extend([item for item in ['Thick Jacket', 'Beanie', 'Mittens', 'Warm boots', 'Snow boots', 'Scarf', 'Snow pants', 'Hand-warmers', 'Long underwear']])
    if 'clear' in description:
        if temp < 18:
            packbyweather.extend([item for item in ['Jacket']])
        else:
            packbyweather.append('Light Jacket')
    if 'breeze' in description:
        packbyweather.extend([item for item in ['Wind Breaker', 'Scarf', 'Hat']])
    
    if temp > 0:
        if temp < 4:
            packbyweather.extend([item for item in ['Beanie', 'Thick Jacket', 'Mittens', 'Warm boots', 'Scarf', 'Hand-warmers'] if item not in packbyweather])
        elif temp < 18:
            packbyweather.extend([item for item in ['Hoodie', 'Jacket', 'Hat'] if item not in packbyweather])
        elif temp < 30:
            packbyweather.extend([item for item in ['Hat', 'Sunglasses', 'Light Jacket'] if item not in packbyweather])
        else:
            packbyweather.extend([item for item in ['Hat', 'Sunglasses', 'Personal fan', 'Aloe Vera'] if item not in packbyweather])
    
    ##############################
    regclothes = ['sets of underwear', 'pairs of socks', 'casual shirts', 'pairs of pants', 'belt', 'set of pajamas']
    quantityclothes = []
    for item in regclothes:
        if 'underwear' in item or 'socks' in item:
            quantityclothes.append(str(triplen+1)+' '+item)
        elif 'shirts' in item:             
            if triplen > 10:
                quantityclothes.append(str(math.ceil(triplen/2)+2)+' '+item)
            elif triplen >= 5:
                quantityclothes.append(str(triplen-2)+' '+item)
            else:
                quantityclothes.append(str(triplen)+' '+item)
        elif 'pants' in item:
            quantityclothes.append(str(math.ceil(triplen/2))+' '+item)
        else:
            quantityclothes.append('1 '+item)
#     print(quantityclothes)
        
    
    
    ''''''
    templatevars = {
        'city' : city,
        'description' : description,
        'humidity': str(cityfile['main']['humidity']),
        'temp' : temp,
        'cloudcover':cloudcover,
        'essentials' : essentials,
        'toiletries' : toiletries,
        'packbyweather' : packbyweather,
        'quantityclothes' : quantityclothes,
        'genweather' : genweather,
        } 
    return render_template('submit.html', templatevars=templatevars)
Пример #57
0
hop_dur    = 10                                                    # Hop duration in millisec (Parameter 2)
fid_text   = open('C:\Users\Mahe\Desktop\eature_file.csv', 'w')    # Output file for storing features (Parameter 4)

fid_text_go   = open('C:\Users\Mahe\Desktop\ure_file.csv', 'w')    # Output file for storing features (Parameter 4)


fs, data = wavfile.read(audio_file)                                # Reading data from wav file in an array
                                                                   # Returns Sample rate and data read from  wav file
data = data / (float(2 ** 15))                                     # Normalizing it to [-1,1] range from [-2^15,2^15]

window_size = int(window_dur * fs * (0.001))                       # Converting window length to samples
hop_size    = int(hop_dur * fs * (0.001))                          # Converting hop length to samples

window_type = np.hamming(window_size)                              # Window type: Hamming (by default) (Parameter 3)

no_frames   = int(math.ceil(len(data) / (float(hop_size))))
zero_array  = np.zeros(window_size)
data        = np.concatenate((data, zero_array))

length=len(data)

ene = [0]*length
for j in range(length):
    ene[j] = data[j]*data[j]

fid_text_go.write("Data"+"\n")
for i in range(length):
    fid_text_go.write(str(ene[i]) + "\n")


#print " Window Size:",window_size," \n Hop Size:",hop_size, "\n No of frames:",no_frames
Пример #58
0
    def __getitem__(self, index):
        delta = self.stride
        if self.with_rgb:
            point_set_ini = self.scene_points_list[index]
        else:
            point_set_ini = self.scene_points_list[index][:, 0:3]

        semantic_seg_ini = self.semantic_labels_list[index].astype(np.int32)

        '''add noise'''
        noise_ratio = 0.2
        raw_xyz = point_set_ini[:, 0:3]
        num_raw_points = raw_xyz.shape[0]
        centroid = np.mean(raw_xyz, axis=0, keepdims=True)
        normalized_xyz = raw_xyz - centroid
        max_l = np.max(normalized_xyz)
        min_l = np.min(normalized_xyz)
        max_length = max(abs(max_l), abs(min_l))

        normalized_xyz = normalized_xyz / max_length
        num_noise = math.ceil(num_raw_points * noise_ratio)
        choices = np.random.choice(num_raw_points, num_noise)
        new_noise_xyz = normalized_xyz[choices]

        new_noise_xyz_shift = np.random.randn(num_noise, 3)
        new_noise_xyz_shift = (new_noise_xyz_shift - 0.5) / 0.5 * 0.002
        new_noise_xyz = new_noise_xyz + new_noise_xyz_shift
        point_set_ini[choices, 0:3] = new_noise_xyz * max_length + centroid
        semantic_seg_ini[choices] = 0
        '''End'''

        coordmax = np.max(point_set_ini[:, 0:3], axis=0)
        coordmin = np.min(point_set_ini[:, 0:3], axis=0)
        nsubvolume_x = np.ceil((coordmax[0] - coordmin[0]) / delta).astype(np.int32)
        nsubvolume_y = np.ceil((coordmax[1] - coordmin[1]) / delta).astype(np.int32)
        point_sets = []
        semantic_segs = []
        sample_weights = []
        point_idxs = []
        block_center = []
        for i in range(nsubvolume_x):
            for j in range(nsubvolume_y):
                curmin = coordmin + [i * delta, j * delta, 0]
                curmax = curmin + [1.5, 1.5, coordmax[2] - coordmin[2]]
                curchoice = np.sum(
                    (point_set_ini[:, 0:3] >= (curmin - 0.2)) * (point_set_ini[:, 0:3] <= (curmax + 0.2)), axis=1) == 3
                curchoice_idx = np.where(curchoice)[0]
                cur_point_set = point_set_ini[curchoice, :]
                cur_semantic_seg = semantic_seg_ini[curchoice]
                if len(cur_semantic_seg) == 0:
                    continue
                mask = np.sum((cur_point_set[:, 0:3] >= (curmin - 0.001)) * (cur_point_set[:, 0:3] <= (curmax + 0.001)),
                              axis=1) == 3
                sample_weight = self.labelweights[cur_semantic_seg]
                sample_weight *= mask  # N
                point_sets.append(cur_point_set)  # 1xNx3/6
                semantic_segs.append(cur_semantic_seg)  # 1xN
                sample_weights.append(sample_weight)  # 1xN
                point_idxs.append(curchoice_idx)  # 1xN
                block_center.append((curmin[0:2] + curmax[0:2]) / 2.0)

        # merge small blocks
        num_blocks = len(point_sets)
        block_idx = 0
        while block_idx < num_blocks:
            if point_sets[block_idx].shape[0] > 4096:
                block_idx += 1
                continue

            small_block_data = point_sets[block_idx].copy()
            small_block_seg = semantic_segs[block_idx].copy()
            small_block_smpw = sample_weights[block_idx].copy()
            small_block_idxs = point_idxs[block_idx].copy()
            small_block_center = block_center[block_idx].copy()
            point_sets.pop(block_idx)
            semantic_segs.pop(block_idx)
            sample_weights.pop(block_idx)
            point_idxs.pop(block_idx)
            block_center.pop(block_idx)
            nearest_block_idx = self.nearest_dist(small_block_center, block_center)
            point_sets[nearest_block_idx] = np.concatenate((point_sets[nearest_block_idx], small_block_data), axis=0)
            semantic_segs[nearest_block_idx] = np.concatenate((semantic_segs[nearest_block_idx], small_block_seg),
                                                              axis=0)
            sample_weights[nearest_block_idx] = np.concatenate((sample_weights[nearest_block_idx], small_block_smpw),
                                                               axis=0)
            point_idxs[nearest_block_idx] = np.concatenate((point_idxs[nearest_block_idx], small_block_idxs), axis=0)
            num_blocks = len(point_sets)

        # divide large blocks
        num_blocks = len(point_sets)
        div_blocks = []
        div_blocks_seg = []
        div_blocks_smpw = []
        div_blocks_idxs = []
        div_blocks_center = []
        for block_idx in range(num_blocks):
            cur_num_pts = point_sets[block_idx].shape[0]

            point_idx_block = np.array([x for x in range(cur_num_pts)])
            if point_idx_block.shape[0] % self.block_points != 0:
                makeup_num = self.block_points - point_idx_block.shape[0] % self.block_points
                np.random.shuffle(point_idx_block)
                point_idx_block = np.concatenate((point_idx_block, point_idx_block[0:makeup_num].copy()))

            np.random.shuffle(point_idx_block)

            sub_blocks = list(self.chunks(point_idx_block, self.block_points))

            div_blocks += self.split_data(point_sets[block_idx], sub_blocks)
            div_blocks_seg += self.split_data(semantic_segs[block_idx], sub_blocks)
            div_blocks_smpw += self.split_data(sample_weights[block_idx], sub_blocks)
            div_blocks_idxs += self.split_data(point_idxs[block_idx], sub_blocks)
            div_blocks_center += [block_center[block_idx].copy() for i in range(len(sub_blocks))]
        div_blocks = np.concatenate(tuple(div_blocks), axis=0)
        div_blocks_seg = np.concatenate(tuple(div_blocks_seg), axis=0)
        div_blocks_smpw = np.concatenate(tuple(div_blocks_smpw), axis=0)
        div_blocks_idxs = np.concatenate(tuple(div_blocks_idxs), axis=0)
        return div_blocks, div_blocks_seg, div_blocks_smpw, div_blocks_idxs
Пример #59
0
# Solver parameters.
# Defining which GPUs to use.
gpus = "0"
gpulist = gpus.split(",")
num_gpus = len(gpulist)

# Divide the mini-batch to different GPUs.
batch_size = 32
accum_batch_size = 32
iter_size = accum_batch_size / batch_size
solver_mode = P.Solver.CPU
device_id = 0
batch_size_per_device = batch_size
if num_gpus > 0:
  batch_size_per_device = int(math.ceil(float(batch_size) / num_gpus))
  iter_size = int(math.ceil(float(accum_batch_size) / (batch_size_per_device * num_gpus)))
  solver_mode = P.Solver.GPU
  device_id = int(gpulist[0])

# Evaluate on whole test set.
num_test_image = 1800
test_batch_size = 1
test_iter = num_test_image / test_batch_size

solver_param = {
    # Train parameters
    'base_lr': base_lr,
    'weight_decay': 0.0005,
    'lr_policy': "step",
    'stepsize': 20000,
Пример #60
0
def ceil_msp(x):
    factor = 10**math.floor(math.log10(abs(x)))
    return math.ceil(x / factor) * factor