Beispiel #1
0
    def get_surrounding_elevation(self, coordinate, window=4, *args, **kwargs):
        """
        Return a square matrix of size window w/ coordinate at center


        :param window: dimension of the square window to be read based on start Coordinates obtained
        :return: a square matrix with sides of length `window`
        """

        px, py = kwargs.get('px', None), kwargs.get('py', None)
        if (px, py) is not (None, None):
            pass
        else:
            pixs = self.lat_lon_to_pixel(coordinate)
            px = pixs.x
            py = pixs.y

        # Determine window
        topLeftX = px - math.floor(window / 2)
        topLeftY = py - math.floor(window / 2)
        try:  # in case raster isnt full extent
            # todo: use negative windowing feature of rasterio read
            elevations = self.img.read(1, window=((topLeftY, topLeftY + window), (topLeftX, topLeftX + window)))
            return elevations
        except:
            raise Exception("Couldn't retrieve given window")
 def estimate_number_of_syllables_in_word(cls, word: str,
                                          language: 'Language'):
     if len(word) < cls.AVERAGE_SYLLABLE_LENGTH:
         syllables = 1  # Always at least 1 syllable
     else:
         syllables = len(word) / cls.AVERAGE_SYLLABLE_LENGTH
     return int(math.floor(syllables))  # Truncate the number of syllables
Beispiel #3
0
    def loadSequence(self, objName):
        '''
        :returns: NamedImgSequence
        '''

        w = 64
        img = Image.new('RGB', size=(w, w), color=(0, 0, 0))
        draw = ImageDraw.Draw(img)

        b = math.floor(w / 5)

        if objName == 'circle':
            draw.ellipse([(b, b), (w - b, w - b)], fill=(255, 0, 0))

        elif objName == 'square':
            draw.rectangle([(b, b), (w - b, w - b)], fill=(255, 0, 0))

        elif objName == 'triangle':
            draw.polygon([(w / 2, b), (b, w - b), (w - b, w - b)],
                         fill=(255, 0, 0))

        del draw

        data = []
        data.append(Frame(img, None, None, None, objName, ""))
        return NamedImgSequence(objName, data)
Beispiel #4
0
def linear_interpolation(colors, x):
    max_index = (len(colors) - 1)
    interval = (1. / max_index)
    ci1 = math.floor(x / interval)
    ci2 = min(math.ceil(x / interval), max_index)

    return linear_points_interpolation(colors[ci1], colors[ci2],
                                       (x - interval * ci1) / interval)
Beispiel #5
0
 def synthesize(self ,  value = None):
     """ synthesizes the waveform
         Specifies the amplitude, otherwise it will be initialiazed as unit-normed """   
     
     global _PyServer
     binIndex = math.floor(self.reducedFrequency * self.length);
     if  value is None:
         self.waveform =  self.mdct_value * _PyServer.getWaveForm(self.length , binIndex)
     else:
         self.waveform = value * _PyServer.getWaveForm(self.length , binIndex)
Beispiel #6
0
    def synthesize(self , method = 0 , forceReSynthesis = True):                
        """ function that will synthesise the approximant using the list of atoms
            this is mostly DEPRECATED"""
        if self.originalSignal == None:
            _Logger.warning("No original Signal provided")
#            return None
        
        if (self.recomposedSignal == None) | forceReSynthesis:
            synthesizedSignal = zeros(self.length)
        
            if len(self.atoms) == 0:
                _Logger.info("No Atoms")
                return None

            # first method by inverse MDCT 
            if method == 0:
                for mdctSize in self.dico.sizes:
                    mdctVec = zeros(self.length)
                    for atom in self.atoms:
                        if atom.length == mdctSize:
                            # bugFIx
                            n = atom.timePosition +1                        
                            frame = math.floor( float(n) / float(atom.length /2) ) +1                        
#                            mdctVec[frame*float(atom.length /2) + atom.frequencyBin] += atom.amplitude
                            mdctVec[frame*float(atom.length /2) + atom.frequencyBin] += atom.mdct_value
                    synthesizedSignal += imdct(mdctVec , mdctSize)
#                    synthesizedSignal += concatenate((zeros(mdctSize/4) , imdct(mdctVec , mdctSize)) )[1:-mdctSize/4+1]
                      
                      
            # second method by recursive atom synthesis - NOT WORKING
            elif method == 1:
                for atom in self.atoms:           
                    atom.synthesizeIFFT()         
                    synthesizedSignal[atom.timePosition : atom.timePosition + atom.length] += atom.waveform
            

            
            # HACK here to resynthesize using LOMP atoms
            elif method == 2:
                for atom in self.atoms:
                    atom.waveForm = atom.synthesizeIFFT()    
                    if (atom.projectionScore is not None):
                            if (atom.projectionScore <0):
                                atom.waveform = (-math.sqrt(-atom.projectionScore/sum(atom.waveform**2)) )*atom.waveform
                            else:
                                atom.waveform = (math.sqrt(atom.projectionScore/sum(atom.waveform**2)) )*atom.waveform
                                         
                    synthesizedSignal[atom.timePosition : atom.timePosition + atom.length] += atom.waveform
            
            
            self.recomposedSignal = signals.Signal(synthesizedSignal , self.samplingFrequency)
            #return self.recomposedSignal
        # other case: we just give the existing synthesized Signal.
        return self.recomposedSignal
Beispiel #7
0
def GetSecNum(data, xcoord, ycoord, nsecs, FitLine):
    secnum = -1
    xmin = 0.
    xmax = data.shape[0]
    
    xfraction = xcoord / xmax
    secsize = 1. / nsecs
    
    secnum = int(math.floor(xfraction / secsize))    
    
    return secnum
Beispiel #8
0
    def estimate_number_of_syllables_in_word_pyphen(cls, word: str, language: 'Language'):

        if language.code == "zh-CN":
            if len(word) < cls.AVERAGE_SYLLABLE_LENGTH:
                syllables = 1  # Always at least 1 syllable
            else:
                syllables = len(word) / cls.AVERAGE_SYLLABLE_LENGTH
            return int(math.floor(syllables))  # Truncate the number of syllables
        else:
            dic = pyphen.Pyphen(lang=language.code)
            syllables = len(dic.positions(word)) + 1
            return syllables
Beispiel #9
0
def max_batch_size(gpu_ram_bytes:int,
                   model:models.Model, scalar_width:int=4,
                   default_max:int=32,
                   usable=0.95, verbose=True)->int:
    """
    See table 2 in https://www.microsoft.com/en-us/research/uploads/prod/2020/05/dnnmem.pdf for
    more categories of usage than are dealt with here; and for a proposal for a tool to do away
    with this estimation.
    See https://arxiv.org/1609.04836 for the suggestion that anything over 32 is probably
    bad anyway.
    :param gpu_ram_bytes: The RAM available to your graphics processor. For dual-GPU cards
    this should still be the memory of a single GPU unless you configure for multiple workers
    :param model: a keras Model which can be inspected to find it's weights and inputs
    :param scalar_width: the width of your datatype in bytes. e.g. 4 for float32, 8 for float64
    :param default_max: Cut-off beyond which we assume that bigger batches will
    degrade generalisability (e.g. https://arxiv.org/1609.04836)
    :param usable: defaults to 0.95 The fraction of GPU memory that should be considered available
    for your model and inputs. Usually less than 100% because of framework, alignment loss,
    buffers, runtime context etc.
    :param verbose: print calculation
    :return: an integer which is our best guess for the biggest power of 2 batch size that will
    fit into your GPU memory at one go.
    """
    assert 0 < gpu_ram_bytes, 'required: 0 < gpu_ram_bytes, you said %r' % gpu_ram_bytes
    assert 0 < usable, 'required: 0 < usable, you said %r' % usable
    assert 0 < scalar_width, 'required: 0< model_dtype_width, you said %r' % scalar_width
    assert model and model.layers, 'model.layers must not be None or empty'
    warnif(usable>1, "You've set usable GPU memory usage to more than 100%")
    all_inputs = sum([ reduce(operator.mul,[dim if dim else 1 for dim in l.input_shape])
                        for l in model.layers])
    outputs = reduce(operator.mul,
                     [dim if dim else 1 for dim in model.layers[-1].output_shape])
    tensors_size= all_inputs + outputs * 3 #outputs, labels, output vs loss gradients
    num_ephemeral=tensors_size # Actual value is ‘we have no idea, it depends on implementation’
    num_weights=sum(
            [ a.shape.num_elements()
              for a in model.trainable_weights + model.non_trainable_weights ])
    num_gradients=num_weights
    num_scalars=tensors_size + num_weights + num_gradients + num_ephemeral

    max_size= int(usable * gpu_ram_bytes / scalar_width / num_scalars)
    best_size= min( 2**int(math.floor(math.log(max_size, 2))), default_max)
    best_size=max(1,best_size)
    if verbose:
        print('Found Inputs+Outputs*3={} scalars. Doubling it for ephemerals. '
              'Weights,Gradients:{} scalars each. Scalar width={}. '
              'Given Usable={}, max batch size for {}GB is {}, best size is {}'\
              .format(tensors_size, num_weights, scalar_width,
                      int(usable*100), gpu_ram_bytes/GB,
                      max_size, best_size))
    return best_size
Beispiel #10
0
def set_gains(g1, g2, g3, g4):
    """
  Set the gains for each core in the order a, b, c, d.
  """
    t = float(g1)
    print math.floor(.5 + t * 255 / 36.) + 0x80,
    adc5g.set_spi_gain(roach2, zdok, 1, t)
    t = float(g2)
    print math.floor(.5 + t * 255 / 36.) + 0x80,
    adc5g.set_spi_gain(roach2, zdok, 2, t)
    t = float(g3)
    print math.floor(.5 + t * 255 / 36.) + 0x80,
    adc5g.set_spi_gain(roach2, zdok, 3, t)
    t = float(g4)
    print math.floor(.5 + t * 255 / 36.) + 0x80
    adc5g.set_spi_gain(roach2, zdok, 4, t)
Beispiel #11
0
def set_phase(p1, p2, p3, p4):
  """
  Set the phases (delays) for each core in the order a, b, c, d.
  """
  t = float(p1)
  print math.floor(.5+t*255/28.)+0x80,
  adc5g.set_spi_phase(roach2,zdok, 1, t)
  t = float(p2)
  print math.floor(.5+t*255/28.)+0x80,
  adc5g.set_spi_phase(roach2,zdok, 2, t)
  t = float(p3)
  print math.floor(.5+t*255/28.)+0x80,
  adc5g.set_spi_phase(roach2,zdok, 3, t)
  t = float(p4)
  print math.floor(.5+t*255/28.)+0x80
  adc5g.set_spi_phase(roach2,zdok, 4, t)
Beispiel #12
0
def set_offs(o1, o2, o3, o4):
  """
  Set the offsets for each core in the order a, b, c, d.
  """
  t = float(o1)
  print math.floor(.5+t*255/100.)+0x80,
  adc5g.set_spi_offset(roach2,zdok, 1, t)
  t = float(o2)
  print math.floor(.5+t*255/100.)+0x80,
  adc5g.set_spi_offset(roach2,zdok, 2, t)
  t = float(o3)
  print math.floor(.5+t*255/100.)+0x80,
  adc5g.set_spi_offset(roach2,zdok, 3, t)
  t = float(o4)
  print math.floor(.5+t*255/100.)+0x80
  adc5g.set_spi_offset(roach2,zdok, 4, t)
Beispiel #13
0
def set_gains(g1, g2, g3, g4):
  """
  Set the gains for each core in the order a, b, c, d.
  """
  t = float(g1)
  print math.floor(.5+t*255/36.)+0x80,
  adc5g.set_spi_gain(roach2,zdok, 1, t)
  t = float(g2)
  print math.floor(.5+t*255/36.)+0x80,
  adc5g.set_spi_gain(roach2,zdok, 2, t)
  t = float(g3)
  print math.floor(.5+t*255/36.)+0x80,
  adc5g.set_spi_gain(roach2,zdok, 3, t)
  t = float(g4)
  print math.floor(.5+t*255/36.)+0x80
  adc5g.set_spi_gain(roach2,zdok, 4, t)
Beispiel #14
0
def set_phase(p1, p2, p3, p4):
    """
  Set the phases (delays) for each core in the order a, b, c, d.
  """
    t = float(p1)
    print math.floor(.5 + t * 255 / 28.) + 0x80,
    adc5g.set_spi_phase(roach2, zdok, 1, t)
    t = float(p2)
    print math.floor(.5 + t * 255 / 28.) + 0x80,
    adc5g.set_spi_phase(roach2, zdok, 2, t)
    t = float(p3)
    print math.floor(.5 + t * 255 / 28.) + 0x80,
    adc5g.set_spi_phase(roach2, zdok, 3, t)
    t = float(p4)
    print math.floor(.5 + t * 255 / 28.) + 0x80
    adc5g.set_spi_phase(roach2, zdok, 4, t)
Beispiel #15
0
def set_offs(o1, o2, o3, o4):
    """
  Set the offsets for each core in the order a, b, c, d.
  """
    t = float(o1)
    print math.floor(.5 + t * 255 / 100.) + 0x80,
    adc5g.set_spi_offset(roach2, zdok, 1, t)
    t = float(o2)
    print math.floor(.5 + t * 255 / 100.) + 0x80,
    adc5g.set_spi_offset(roach2, zdok, 2, t)
    t = float(o3)
    print math.floor(.5 + t * 255 / 100.) + 0x80,
    adc5g.set_spi_offset(roach2, zdok, 3, t)
    t = float(o4)
    print math.floor(.5 + t * 255 / 100.) + 0x80
    adc5g.set_spi_offset(roach2, zdok, 4, t)
Beispiel #16
0
    def toSparseArray(self):
        """ Returns the approximant as a sparse dictionary object , key is the index of the atom and value is its amplitude"""
        sparseArray = {}
        # quickly creates a dictionary for block numbering
        blockIndexes = {}
        for i in range(len(self.dico.sizes)):
            blockIndexes[self.dico.sizes[i]] = i
            
        for atom in self.atoms:
            block = blockIndexes[atom.length]
            n = atom.timePosition +1                        
            frame = math.floor( float(n) / float(atom.length /2) ) +1  
#            sparseArray[int(block*self.length +  frame*float(atom.length /2) + atom.frequencyBin)] = (atom.mdct_value , frame*float(atom.length /2) - atom.timePosition)
            sparseArray[int(block*self.length +  frame*float(atom.length /2) + atom.frequencyBin)] = (atom.mdct_value , atom.timePosition)
        return sparseArray
Beispiel #17
0
def get_sim_data(freq, exact=True):
  """
  Make a simulated snapshot of data
  """
  if exact:
    offs = [0,0,0,0]
    gains = [1,1,1,1]
  else:
    offs = [.2, .3, -.2, -.1]
    gains = [1.001, .9984, .999, 1.002]
  del_phi = 2 * math.pi * freq / samp_freq
  data = np.empty((numpoints), dtype='int32')
  phase = 2*math.pi * np.random.uniform()
  for n in range(numpoints):
    core = n&3
    data[n] = (math.floor(0.5 + 119.0 * math.sin(del_phi * n + phase) + \
        offs[core]))*gains[core]
  return data
Beispiel #18
0
def get_sim_data(freq, exact=True):
    """
  Make a simulated snapshot of data
  """
    if exact:
        offs = [0, 0, 0, 0]
        gains = [1, 1, 1, 1]
    else:
        offs = [.2, .3, -.2, -.1]
        gains = [1.001, .9984, .999, 1.002]
    del_phi = 2 * math.pi * freq / samp_freq
    data = np.empty((numpoints), dtype='int32')
    phase = 2 * math.pi * np.random.uniform()
    for n in range(numpoints):
        core = n & 3
        data[n] = (math.floor(0.5 + 119.0 * math.sin(del_phi * n + phase) + \
            offs[core]))*gains[core]
    return data
Beispiel #19
0
    def toDico(self):
        """ Returns the approximant as a sparse dictionary object , 
        key is the index of the atom and values are atom objects"""
        dico = {}
            
        for atom in self.atoms:
            block = [i for i in range(len(self.dico.sizes)) if self.dico.sizes[i]==atom.length][0]
            n = atom.timePosition +1                        
            frame = math.floor( float(n) / float(atom.length /2) ) +1  
#            sparseArray[int(block*self.length +  frame*float(atom.length /2) + atom.frequencyBin)] = (atom.mdct_value , frame*float(atom.length /2) - atom.timePosition)
#            dico[int(block*self.length +  frame*float(atom.length /2) + atom.frequencyBin)] = atom
            key = int(block*self.length +  frame*float(atom.length /2) + atom.frequencyBin)
            if key in dico:
                dico[key].append(atom)
            else:
                dico[key] = [atom]
                
        return dico
    def generate_steps(self, values: List[int]) -> List[Step]:
        """
        Fufuills the GifStrategy.generate_steps contract using Comb Sort

        Args:
            values (List[int]): the values to be sorted.

        Returns:
            steps (List[Step]): the steps used in sorting.
        """
        steps = []
        # k is generaly 1.3 in comb sort
        shrink = 1.3
        gap = len(values)
        exchange_occurred = True
        num_iterations = 0
        while exchange_occurred:
            gap /= shrink
            gap = math.floor(gap)
            if gap <= 1:
                exchange_occurred = False
                gap = 1
            for i in range (len(values) - gap):
                remove_cursor = Change("remove_cursor", [i+gap-1])
                add_cursor = Change("add_cursor", [i+gap])
                steps.append(Step(i, [remove_cursor, add_cursor]))
                if values[i] > values[i + gap]:
                    values[i], values[i+ gap] = values[i + gap], values[i]
                    exchange = Change("exchange", [i, i+gap])
                    steps.append(Step(i, [exchange]))
                    exchange_occurred = True
                if i == len(values) - gap - 1:
                    remove_final_cursor = Change("remove_cursor", [i+gap])
                    steps.append(Step(i, [remove_final_cursor]))
        sorted_vals = list(range(0, len(values)))
        sorted_vals.insert(0, "sorted")
        sort = Change("color", sorted_vals)
        steps.append(Step(0, [copy(sort)]))
        return steps
def _NeRegressionGraphCalc(dataVctrs, expectedSlope = None, popTable = None):
    #get linear regression stats for all datasets
    LineStats = []
    for line in list(dataVctrs.values()):
        data = line_regress(line)
        LineStats.append(data)
    #flatten the array
    all_points = [val for sublist in list(dataVctrs.values())  for val in sublist]
    #unzip to obtain x and y value vectors for all points
    xVals, yVals = list(zip(*all_points))

    minX = min(xVals)
    maxX = max(xVals)+1
    xVctr = list(set(all_points))
    if maxX - minX>1:

        xVctr = list(range(int(math.floor(minX)),int(math.ceil(maxX))))

    lineVctrs =[]
    colorVctr = []
    styleVctr = []

    #creates expected slope line for comparisons
    if expectedSlope:
        expectedPoints = []
        if expectedSlope == "pop":
            if popTable:
                averagePopPoints = []
                all_points = [val for sublist in popTable for val in popTable[sublist]]
                xVals, yVals = list(zip(*all_points))
                xSet = set(xVals)
                for x in xSet:
                    pointYSet = [point[1] for point in all_points if point[0] == x]
                    averageY = mean(pointYSet)
                    averagePopPoints.append((x,averageY))
            expectedPoints = averagePopPoints
        else:
            #get all slope and intercept values to get means
            slopes = []
            intercepts = []
            for statDict in LineStats:
                slopes.append(statDict["slope"])
                intercepts.append(statDict["intercept"])

            #get expected line Stats
            expectedSlope,expectedIntercept = _getExpectedLineStats(slopes, intercepts, xVctr,expectedSlope)
            expectedPoints = _getGraphLine(expectedSlope, expectedIntercept, xVctr)


        #make expected line for plotting
        if len(expectedPoints)>0:
            lineVctrs.append(expectedPoints)
            colorVctr.append("r")
            styleVctr.append("-")

    for statDict in LineStats:
        slope = statDict["slope"]
        intercept = statDict["intercept"]
        if not isnan(slope):
            linePoints  = _getGraphLine(slope, intercept, xVctr)
            lineVctrs.append(linePoints)
            colorVctr.append("b")
            styleVctr.append("--")
    return lineVctrs, colorVctr,styleVctr
Beispiel #22
0
def convert_size(size_bytes):
    if size_bytes == 0:
        return "0B"  # pragma: no cover
    size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
    i = int(math.floor(math.log(size_bytes, 1024)))
    return "%s %s" % (int(size_bytes / math.pow(1024, i)), size_name[i])
def convert_size(size_bytes):
    if size_bytes == 0:
        return "0B"  # pragma: no cover
    size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
    i = int(math.floor(math.log(size_bytes, 1024)))
    return "%s %s" % (int(size_bytes / math.pow(1024, i)), size_name[i])