def perform(self, node, inp, out): """ """ x, = inp z, = out if len(x.shape) != 4: raise NotImplementedError("DownsampleFactorMax requires 4D input for now") z_shape = self.out_shape(x.shape, self.ds, self.ignore_border, self.st) if (z[0] is None) or (z[0].shape != z_shape): z[0] = numpy.zeros(self.out_shape(x.shape, self.ds, self.ignore_border, self.st)) z[0] = theano._asarray(z[0], dtype=x.dtype) zz = z[0] ## zz needs to be initialized with -inf for the following to work zz -= numpy.inf # number of pooling output rows pr = zz.shape[-2] # number of pooling output cols pc = zz.shape[-1] ds0, ds1 = self.ds st0, st1 = self.st img_rows = x.shape[-2] img_cols = x.shape[-1] for n in xrange(x.shape[0]): for k in xrange(x.shape[1]): for r in xrange(pr): row_st = r * st0 row_end = __builtin__.min(row_st + ds0, img_rows) for c in xrange(pc): col_st = c * st1 col_end = __builtin__.min(col_st + ds1, img_cols) for row_ind in xrange(row_st, row_end): for col_ind in xrange(col_st, col_end): zz[n, k, r, c] = __builtin__.max(zz[n, k, r, c], x[n, k, row_ind, col_ind])
def perform(self, node, inp, out): x, maxout, ggx = inp z, = out if len(x.shape) != 4: raise NotImplementedError( 'DownsampleFactorMaxGradGrad requires 4D input for now') z_shape = self.out_shape(x.shape, self.ds, self.ignore_border, self.st) if (z[0] is None) or (z[0].shape != z_shape): z[0] = numpy.zeros(self.out_shape(x.shape, self.ds, self.ignore_border, self.st), dtype=x.dtype) ggz = z[0] # number of pooling output rows pr = ggz.shape[-2] # number of pooling output cols pc = ggz.shape[-1] ds0, ds1 = self.ds st0, st1 = self.st img_rows = x.shape[-2] img_cols = x.shape[-1] for n in xrange(x.shape[0]): for k in xrange(x.shape[1]): for r in xrange(pr): row_st = r * st0 row_end = __builtin__.min(row_st + ds0, img_rows) for c in xrange(pc): col_st = c * st1 col_end = __builtin__.min(col_st + ds1, img_cols) for row_ind in xrange(row_st, row_end): for col_ind in xrange(col_st, col_end): if (maxout[n, k, r, c] == x[n, k, row_ind, col_ind]): ggz[n, k, r, c] = ggx[n, k, row_ind, col_ind]
def CreateSubstring(df, inCol, outCol, strLen, delim, startPos, endPos, makeList=False): if endPos <= startPos: df = df.withColumn(outCol, lit('')) #here we create a substring of a string column startPos = builtin.min(builtin.max(0, startPos), strLen) endPos = builtin.min(builtin.max(startPos, endPos), strLen) #if one end of string coincides with beginning if startPos == 0: df = df.withColumn(outCol, substring_index(inCol, delim, endPos)) #if one end of string coincides with end elif endPos == strLen: df = df.withColumn(outCol, substring_index(inCol, delim, startPos - endPos)) #if string is in middle else: #extract string from beginning upto position and then extract right end df = df.withColumn(outCol, substring_index(inCol, delim, endPos)) \ .withColumn(outCol, substring_index(outCol, delim, startPos - endPos)) #if string should be broken into list if makeList == True: df = df.withColumn(outCol, split(outCol, delim)) return df
def perform(self, node, inp, out): x, maxout, gz = inp gx_stg, = out gx = numpy.zeros_like(x) #number of pooling output rows pr = maxout.shape[-2] #number of pooling output cols pc = maxout.shape[-1] ds0, ds1 = self.ds st0, st1 = self.st img_rows = x.shape[-2] img_cols = x.shape[-1] for n in xrange(x.shape[0]): for k in xrange(x.shape[1]): for r in xrange(pr): row_st = r * st0 row_end = __builtin__.min(row_st + ds0, img_rows) for c in xrange(pc): col_st = c * st1 col_end = __builtin__.min(col_st + ds1, img_cols) for row_ind in xrange(row_st, row_end): for col_ind in xrange(col_st, col_end): if (maxout[n, k, r, c] == x[n, k, row_ind, col_ind]): gx[n, k, row_ind, col_ind] += gz[n, k, r, c] gx_stg[0] = gx
def numpy_max_pool_2d_stride(input, ds, ignore_border=False, st=None): '''Helper function, implementing max_pool_2d in pure numpy this function provides st input to indicate the stide size for the pooling regions. if not indicated, st == sd.''' if len(input.shape) < 2: raise NotImplementedError('input should have at least 2 dim,' ' shape is %s' % str(input.shape)) if st is None: st = ds xi = 0 yi = 0 img_rows = input.shape[-2] img_cols = input.shape[-1] out_r = 0 out_c = 0 if img_rows - ds[0] >= 0: out_r = (img_rows - ds[0]) // st[0] + 1 if img_cols - ds[1] >= 0: out_c = (img_cols - ds[1]) // st[1] + 1 if not ignore_border: if out_r > 0: if img_rows - ((out_r - 1) * st[0] + ds[0]) > 0: rr = img_rows - out_r * st[0] if rr > 0: out_r += 1 else: if img_rows > 0: out_r += 1 if out_c > 0: if img_cols - ((out_c - 1) * st[1] + ds[1]) > 0: cr = img_cols - out_c * st[1] if cr > 0: out_c += 1 else: if img_cols > 0: out_c += 1 out_shp = list(input.shape[:-2]) out_shp.append(out_r) out_shp.append(out_c) output_val = numpy.zeros(out_shp) for k in numpy.ndindex(*input.shape[:-2]): for i in range(output_val.shape[-2]): ii_st = i * st[0] ii_end = __builtin__.min(ii_st + ds[0], img_rows) for j in range(output_val.shape[-1]): jj_st = j * st[1] jj_end = __builtin__.min(jj_st + ds[1], img_cols) patch = input[k][ii_st:ii_end, jj_st:jj_end] output_val[k][i, j] = numpy.max(patch) return output_val
def numpy_max_pool_2d_stride_padding(x, ds, ignore_border=True, st=None, padding=(0, 0), mode='max'): pad_h = padding[0] pad_w = padding[1] h = x.shape[-2] w = x.shape[-1] assert ds[0] > pad_h assert ds[1] > pad_w def pad_img(x): y = numpy.zeros((x.shape[0], x.shape[1], x.shape[2] + pad_h * 2, x.shape[3] + pad_w * 2), dtype=x.dtype) y[:, :, pad_h:(x.shape[2] + pad_h), pad_w:(x.shape[3] + pad_w)] = x return y img_rows = h + 2 * pad_h img_cols = w + 2 * pad_w out_r = (img_rows - ds[0]) // st[0] + 1 out_c = (img_cols - ds[1]) // st[1] + 1 out_shp = list(x.shape[:-2]) out_shp.append(out_r) out_shp.append(out_c) ds0, ds1 = ds st0, st1 = st output_val = numpy.zeros(out_shp) tt = [] y = pad_img(x) func = numpy.max if mode == 'sum': func = numpy.sum elif mode != 'max': func = numpy.average inc_pad = mode == 'average_inc_pad' for k in numpy.ndindex(*x.shape[:-2]): for i in range(output_val.shape[-2]): ii_st = i * st[0] ii_end = __builtin__.min(ii_st + ds[0], img_rows) if not inc_pad: ii_st = __builtin__.max(ii_st, pad_h) ii_end = __builtin__.min(ii_end, h + pad_h) for j in range(output_val.shape[-1]): jj_st = j * st[1] jj_end = __builtin__.min(jj_st + ds[1], img_cols) if not inc_pad: jj_st = __builtin__.max(jj_st, pad_w) jj_end = __builtin__.min(jj_end, w + pad_w) patch = y[k][ii_st:ii_end, jj_st:jj_end] output_val[k][i, j] = func(patch) return output_val
def __init__(self, initial_width, initial_height, visible_meters, length, mass): QGraphicsScene.__init__(self) self.visible_meters = visible_meters self.initial_width = initial_width self.initial_height = initial_height self.pend_radius = min(0.5 * mass, 0.2) self.pole_length = length self.unit = min(self.initial_width, self.initial_height) / self.visible_meters self.__create_scene() self.update_state(0.0, 0.0)
def perform(self, node, inp, out): """ """ x, = inp z, = out if len(x.shape) != 4: raise NotImplementedError( 'DownsampleFactorStoch requires 4D input for now') z_shape = self.out_shape(x.shape, self.ds, self.ignore_border, self.st) if (z[0] is None) or (z[0].shape != z_shape): z[0] = numpy.zeros(self.out_shape(x.shape, self.ds, self.ignore_border, self.st)) z[0] = theano._asarray(z[0], dtype=x.dtype) zz = z[0] ## zz needs to be initialized with -inf for the following to work zz -= numpy.inf #number of pooling output rows pr = zz.shape[-2] #number of pooling output cols pc = zz.shape[-1] ds0, ds1 = self.ds st0, st1 = self.st img_rows = x.shape[-2] img_cols = x.shape[-1] for n in xrange(x.shape[0]): for k in xrange(x.shape[1]): for r in xrange(pr): row_st = r * st0 row_end = __builtin__.min(row_st + ds0, img_rows) for c in xrange(pc): col_st = c * st1 col_end = __builtin__.min(col_st + ds1, img_cols) # for row_ind in xrange(row_st, row_end): # for col_ind in xrange(col_st, col_end): # zz[n, k, r, c] = \ # __builtin__.max(zz[n, k, r, c], # x[n, k, row_ind, col_ind]) block = x[n, k, row_st:row_end, col_st:col_end] block = block.flatten() print block, "block" # calculate probabilities probs = block/numpy.sum(block) weighted_avg = numpy.sum(probs * block) # print weighted_avg # sammple according to probabilities # stoch_value = numpy.random.choice(block, replace=False, p= probs) #max_value = block.flat[abs(block).argmax()] for calcuating the absolute max value #stoch_index = numpy.where( block == stoch_value )[0][0] # output[ x_dim, y_dim ] = stoch_value zz[n, k, r, c] = weighted_avg
def perform(self, node, inp, out): """ """ x, = inp z, = out if len(x.shape) != 4: raise NotImplementedError( 'DownsampleFactorStoch requires 4D input for now') z_shape = self.out_shape(x.shape, self.ds, self.ignore_border, self.st) if (z[0] is None) or (z[0].shape != z_shape): z[0] = numpy.zeros(self.out_shape(x.shape, self.ds, self.ignore_border, self.st)) z[0] = theano._asarray(z[0], dtype=x.dtype) zz = z[0] ## zz needs to be initialized with -inf for the following to work zz -= numpy.inf #number of pooling output rows pr = zz.shape[-2] #number of pooling output cols pc = zz.shape[-1] ds0, ds1 = self.ds st0, st1 = self.st img_rows = x.shape[-2] img_cols = x.shape[-1] for n in xrange(x.shape[0]): for k in xrange(x.shape[1]): for r in xrange(pr): row_st = r * st0 row_end = __builtin__.min(row_st + ds0, img_rows) for c in xrange(pc): col_st = c * st1 col_end = __builtin__.min(col_st + ds1, img_cols) for row_ind in xrange(row_st, row_end): for col_ind in xrange(col_st, col_end): # zz[n, k, r, c] = \ # __builtin__.min(zz[n, k, r, c], # x[n, k, row_ind, col_ind]) block = x[n, k, row_st:row_end, col_st:col_end] # dtype=theano.config.floatX) block = numpy.float64(block.flatten()) # print block, "block" # calculate probabilities probs = numpy.abs(block)/numpy.sum(numpy.abs(block)) # probs = probs/probs.sum() # probs = probs[:,numpy.newaxis] # probs = numpy.float64(probs) # probs = sklearn.preprocessing.normalize(probs,axis=0, norm='l1') # print probs, numpy.abs(block), numpy.sum(probs), "sum block", repr(sum(probs[:,0])) # sammple according to probabilities zz[n, k, r, c] = numpy.random.choice(block,replace=False, p= probs)
def perform(self, node, inp, out): x, = inp z, = out if len(x.shape) != 4: raise NotImplementedError( 'DownsampleFactorMax requires 4D input for now') z_shape = self.out_shape(x.shape, self.ds, self.ignore_border, self.st, self.padding) if (z[0] is None) or (z[0].shape != z_shape): z[0] = numpy.empty(z_shape, dtype=x.dtype) zz = z[0] # number of pooling output rows pr = zz.shape[-2] # number of pooling output cols pc = zz.shape[-1] ds0, ds1 = self.ds st0, st1 = self.st pad_h = self.padding[0] pad_w = self.padding[1] img_rows = x.shape[-2] + 2 * pad_h img_cols = x.shape[-1] + 2 * pad_w inc_pad = self.mode == 'average_inc_pad' # pad the image if self.padding != (0, 0): y = numpy.zeros( (x.shape[0], x.shape[1], img_rows, img_cols), dtype=x.dtype) y[:, :, pad_h:(img_rows-pad_h), pad_w:(img_cols-pad_w)] = x else: y = x func = numpy.max if self.mode == 'sum': func = numpy.sum elif self.mode != 'max': func = numpy.average for n in xrange(x.shape[0]): for k in xrange(x.shape[1]): for r in xrange(pr): row_st = r * st0 row_end = __builtin__.min(row_st + ds0, img_rows) if not inc_pad: row_st = __builtin__.max(row_st, self.padding[0]) row_end = __builtin__.min(row_end, x.shape[-2] + pad_h) for c in xrange(pc): col_st = c * st1 col_end = __builtin__.min(col_st + ds1, img_cols) if not inc_pad: col_st = __builtin__.max(col_st, self.padding[1]) col_end = __builtin__.min(col_end, x.shape[-1] + pad_w) zz[n, k, r, c] = func(y[ n, k, row_st:row_end, col_st:col_end])
def _make_sparse_diagonal(tile, ex): data = sp.lil_matrix(ex.shape, dtype=tile.dtype) if ex.ul[0] >= ex.ul[1] and ex.ul[0] < ex.lr[1]: for i in range(ex.ul[0], __builtin__.min(ex.lr[0], ex.lr[1])): data[i - ex.ul[0], i - ex.ul[1]] = 1 elif ex.ul[1] >= ex.ul[0] and ex.ul[1] < ex.lr[0]: for j in range(ex.ul[1], __builtin__.min(ex.lr[1], ex.lr[0])): data[j - ex.ul[0], j - ex.ul[1]] = 1 return [(ex, data)]
def perform(self, node, inp, out): x, = inp z, = out if len(x.shape) != 4: raise NotImplementedError( 'DownsampleFactorMax requires 4D input for now') z_shape = self.out_shape(x.shape, self.ds, self.ignore_border, self.st, self.padding) if (z[0] is None) or (z[0].shape != z_shape): z[0] = numpy.empty(z_shape, dtype=x.dtype) zz = z[0] # number of pooling output rows pr = zz.shape[-2] # number of pooling output cols pc = zz.shape[-1] ds0, ds1 = self.ds st0, st1 = self.st pad_h = self.padding[0] pad_w = self.padding[1] img_rows = x.shape[-2] + 2 * pad_h img_cols = x.shape[-1] + 2 * pad_w inc_pad = self.mode == 'average_inc_pad' # pad the image if self.padding != (0, 0): y = numpy.zeros((x.shape[0], x.shape[1], img_rows, img_cols), dtype=x.dtype) y[:, :, pad_h:(img_rows - pad_h), pad_w:(img_cols - pad_w)] = x else: y = x func = numpy.max if self.mode == 'sum': func = numpy.sum elif self.mode != 'max': func = numpy.average for n in xrange(x.shape[0]): for k in xrange(x.shape[1]): for r in xrange(pr): row_st = r * st0 row_end = __builtin__.min(row_st + ds0, img_rows) if not inc_pad: row_st = __builtin__.max(row_st, self.padding[0]) row_end = __builtin__.min(row_end, x.shape[-2] + pad_h) for c in xrange(pc): col_st = c * st1 col_end = __builtin__.min(col_st + ds1, img_cols) if not inc_pad: col_st = __builtin__.max(col_st, self.padding[1]) col_end = __builtin__.min(col_end, x.shape[-1] + pad_w) zz[n, k, r, c] = func(y[n, k, row_st:row_end, col_st:col_end])
def _make_sparse_diagonal(tile, ex): ul, lr = ex[0], ex[1] data = sp.lil_matrix(tile.shape, dtype=tile.dtype) if ul[0] >= ul[1] and ul[0] < lr[1]: # below the diagonal for i in range(ul[0], __builtin__.min(lr[0], lr[1])): data[i - ul[0], i - ul[1]] = 1 elif ul[1] >= ul[0] and ul[1] < lr[0]: # above the diagonal for j in range(ul[1], __builtin__.min(lr[1], lr[0])): data[j - ul[0], j - ul[1]] = 1 return data
def numpy_max_pool_2d_stride_padding( x, ds, ignore_border=True, st=None, padding=(0, 0), mode='max'): pad_h = padding[0] pad_w = padding[1] h = x.shape[-2] w = x.shape[-1] assert ds[0] > pad_h assert ds[1] > pad_w def pad_img(x): y = numpy.zeros( (x.shape[0], x.shape[1], x.shape[2]+pad_h*2, x.shape[3]+pad_w*2), dtype=x.dtype) y[:, :, pad_h:(x.shape[2]+pad_h), pad_w:(x.shape[3]+pad_w)] = x return y img_rows = h + 2 * pad_h img_cols = w + 2 * pad_w out_r = (img_rows - ds[0]) // st[0] + 1 out_c = (img_cols - ds[1]) // st[1] + 1 out_shp = list(x.shape[:-2]) out_shp.append(out_r) out_shp.append(out_c) ds0, ds1 = ds st0, st1 = st output_val = numpy.zeros(out_shp) tt = [] y = pad_img(x) func = numpy.max if mode == 'sum': func = numpy.sum elif mode != 'max': func = numpy.average inc_pad = mode == 'average_inc_pad' for k in numpy.ndindex(*x.shape[:-2]): for i in range(output_val.shape[-2]): ii_st = i * st[0] ii_end = __builtin__.min(ii_st + ds[0], img_rows) if not inc_pad: ii_st = __builtin__.max(ii_st, pad_h) ii_end = __builtin__.min(ii_end, h + pad_h) for j in range(output_val.shape[-1]): jj_st = j * st[1] jj_end = __builtin__.min(jj_st + ds[1], img_cols) if not inc_pad: jj_st = __builtin__.max(jj_st, pad_w) jj_end = __builtin__.min(jj_end, w + pad_w) patch = y[k][ii_st:ii_end, jj_st:jj_end] output_val[k][i, j] = func(patch) return output_val
def min(*args, **kwargs): """Symbolic minimum.""" if len(args) > 1: return min(args, **kwargs) # 1 argument: iterable if isinstance(args[0], Range): return args[0].min() if isgenerator(args[0]): # don't process generators return __builtin__.min(*args, **kwargs) if any(isinstance(arg, Expression) for arg in args[0]): return Min(*args[0]) return __builtin__.min(*args, **kwargs)
def neg(obj): if isinstance(obj, (int, float)): return __builtin__.min(obj, 0) elif isinstance(obj, (matrix, spmatrix)): (r, c) = size(obj) z = zeros(r, c) for i in xrange(r*c): z[i] = __builtin__.min(obj[i], 0) return z elif isneg(obj): return obj elif ispos(obj): return zeros(size(obj)) else: return negfunction(obj)
def neg(obj): if isinstance(obj, (int, float)): return __builtin__.min(obj, 0) elif isinstance(obj, (matrix, spmatrix)): (r, c) = size(obj) z = zeros(r, c) for i in xrange(r * c): z[i] = __builtin__.min(obj[i], 0) return z elif isneg(obj): return obj elif ispos(obj): return zeros(size(obj)) else: return negfunction(obj)
def SetupBasisPairs(self): distr = self.Representation.GetDistributedModel() rank = self.Representation.GetBaseRank() localRange = distr.GetLocalIndexRange(self.GetGlobalBasisPairCount(), rank) count = self.GetGlobalBasisPairCount() N = self.BSplineObject.NumberOfBSplines k = self.BSplineObject.MaxSplineOrder pairs = zeros((self.GetGlobalBasisPairCount(), 2), dtype=int32) pairs[:,0] = 0 pairs[:,1] = N-1 index = 0 for i in xrange(N): for j in xrange(max(0, i-k+1), min(i+k, N)): pairs[index, 0] = i pairs[index, 1] = j index+=1 if index != pairs.shape[0]: raise Execption() #store pairs self.GlobalIndexPairs = pairs self.LocalBasisPairIndices = r_[localRange] self.LocalIndexPairs = pairs[localRange] return pairs
def set_range(self, min=None, max=None, relative=False): ''' :param min: The lower limit of the range :param max: The upper limit of the range :param relative: If True then min and max are provided as 0 to 1 values otherwise are absolute values. ''' if min is None and max is None: raise ValueError('You must set at least the min or the max') if min is not None: if not relative: self._range['min'] = min else: self._range['min'] = self._to_absolute(__builtin__.max(min, 0)) if max is not None: if not relative: self._range['max'] = max else: self._range['max'] = self._to_absolute(__builtin__.min(max, 1)) self._cache_clear() old_index = self._sieve.index self._sieve.query = self._generate_query() included = self._sieve.index - old_index excluded = old_index - self._sieve.index return dict(included=list(included), excluded=list(excluded))
def SetupBasisPairs(self): distr = self.Representation.GetDistributedModel() rank = self.Representation.GetBaseRank() localRange = distr.GetLocalIndexRange(self.GetGlobalBasisPairCount(), rank) count = self.GetGlobalBasisPairCount() N = self.BSplineObject.NumberOfBSplines k = self.BSplineObject.MaxSplineOrder pairs = zeros((self.GetGlobalBasisPairCount(), 2), dtype=int32) pairs[:, 0] = 0 pairs[:, 1] = N - 1 index = 0 for i in xrange(N): for j in xrange(max(0, i - k + 1), min(i + k, N)): pairs[index, 0] = i pairs[index, 1] = j index += 1 if index != pairs.shape[0]: raise Execption() #store pairs self.GlobalIndexPairs = pairs self.LocalBasisPairIndices = r_[localRange] self.LocalIndexPairs = pairs[localRange] return pairs
def implementation(groupByRecord, resultColumnName, state, record=None): if (state == AGGREGATION_STATE__PROCESS_RECORD): v = record[columnName] if (columnName in record) else None if (v is not None): groupByRecord[resultColumnName] = __builtin__.min( v, groupByRecord[resultColumnName]) if ( resultColumnName in groupByRecord) else v
def process(self, plotspec_to_traces_dict): """ Assign colours to the traces. We aim to minise color clashes, but still only use one color for a given trace, even if it appears on multiple plots. 1/ We build a graph, in which each node represents trace, and edges represent 'linkage' 2/ We look at the connected components, i.e. the traces that should all have the same color_indices 3/ If we have more groups than colours, then we allocate 'color indices to these groups based on mimising color collisions the plots. ## TODO: 4/ Actual colour is assigned by the color_assigner. """ import networkx all_traces = set(chain(*plotspec_to_traces_dict.values())) allocated_trace_colors = {} color_indices = range(len(self._color_cycle)) G = networkx.Graph() # Add a node per trace: for trace in all_traces: G.add_node(trace) # Add the edges: all_links = self._linkages_explicit + self._get_linkages_from_rules( all_traces) for link in all_links: (first, remaining) = (link[0], link[1:]) for r in remaining: G.add_edge(first, r) groups = networkx.connected_components(G) for grp in sorted(groups, key=lambda g: (len(g), id(g[0])), reverse=True): #Calculate how many collisions we would have for each allocation: def index_score(i): s = _get_collision_of_color_index_for_group( colorIndex=i, group=grp, plotspec_to_traces_dict=plotspec_to_traces_dict, allocated_trace_colors=allocated_trace_colors) return s new_index = bi.min(color_indices, key=index_score) # Allocate to colorIndex: for g in grp: allocated_trace_colors[g] = new_index # We have now assigned a color_index to each group, all that now remains # Make the allocation from index to colors: self._color_allocations = {} for trace in all_traces: self._color_allocations[trace] = self._color_cycle[ allocated_trace_colors[trace]]
def HPDF(data, min=None, max=None): """ Histogram PDF - initialized with points from a histogram. This function creates a PDF from a histogram. This is useful when some other software has generated a PDF from your data. :param data: A two dimensional array. The first column is the histogram interval mean, and the second column is the probability. The probability values do not need to be normalized. :param min: A minimum value for the PDF range. If your histogram has values very close to 0, and you know values of 0 are impossible, then you should set the ***min*** parameter. :param max: A maximum value for the PDF range. :type data: 2D numpy array :returns: A PDF object. """ x = data[:, 0] y = data[:, 1] sp = interpolate.splrep(x, y) dx = (x[1] - x[0]) / 2.0 mmin = x[0] - dx mmax = x[-1] + dx if min is not None: mmin = __builtin__.max(min, mmin) if max is not None: mmax = __builtin__.min(max, mmax) x = np.linspace(mmin, mmax, options['pdf']['numpart']) y = interpolate.splev(x, sp) y[y < 0] = 0 # if the extrapolation goes negative... return PDF(x, y)
def splay(queue, n, kernel_specific_max_wg_size=None): dev = queue.device max_work_items = _builtin_min(128, dev.max_work_group_size) if kernel_specific_max_wg_size is not None: from __builtin__ import min max_work_items = min(max_work_items, kernel_specific_max_wg_size) min_work_items = _builtin_min(32, max_work_items) max_groups = dev.max_compute_units * 4 * 8 # 4 to overfill the device # 8 is an Nvidia constant--that's how many # groups fit onto one compute device if n < min_work_items: group_count = 1 work_items_per_group = min_work_items elif n < (max_groups * min_work_items): group_count = (n + min_work_items - 1) // min_work_items work_items_per_group = min_work_items elif n < (max_groups * max_work_items): group_count = max_groups grp = (n + min_work_items - 1) // min_work_items work_items_per_group = ((grp + max_groups - 1) // max_groups) * min_work_items else: group_count = max_groups work_items_per_group = max_work_items # print "n:%d gc:%d wipg:%d" % (n, group_count, work_items_per_group) return (group_count * work_items_per_group, 1, 1), (work_items_per_group, 1, 1)
def _get_range(sfunc, min, max): " Truncate PDFs with long tails" num_tails = int(sfunc.ppf(0) == np.NINF) + int(sfunc.ppf(1) == np.PINF) _range = options['pdf']['range'] if num_tails: if num_tails == 2: range = [(1.0 - _range)/2, (1.0 + _range)/2] else: range = [1.0 - _range, _range] mmin = sfunc.ppf(0) if mmin == np.NINF: mmin = sfunc.ppf(range[0]) mmax = sfunc.ppf(1) if mmax == np.PINF: mmax = sfunc.ppf(range[1]) if min is not None: min = __builtin__.max(min, mmin) else: min = mmin if max is not None: max = __builtin__.min(max, mmax) else: max = mmax return min, max
def min(l): "Return the minimum of the elements in l, or nan if any element is nan." import __builtin__ try: return __builtin__.min(ensure_nonan(x) for x in l) except NanException: return nan
def _nanmin(values, axis=None, skipna=True): mask = isnull(values) dtype = values.dtype if skipna and not issubclass(dtype.type, (np.integer, np.datetime64)): values = values.copy() np.putmask(values, mask, np.inf) if issubclass(dtype.type, np.datetime64): values = values.view(np.int64) # numpy 1.6.1 workaround in Python 3.x if values.dtype == np.object_ and sys.version_info[0] >= 3: # pragma: no cover import __builtin__ if values.ndim > 1: apply_ax = axis if axis is not None else 0 result = np.apply_along_axis(__builtin__.min, apply_ax, values) else: result = __builtin__.min(values) else: if (axis is not None and values.shape[axis] == 0) or values.size == 0: result = values.sum(axis) result.fill(np.nan) else: result = values.min(axis) if issubclass(dtype.type, np.datetime64): if not isinstance(result, np.ndarray): result = lib.Timestamp(result) else: result = result.view(dtype) return _maybe_null_out(result, axis, mask)
def _nanmin(values, axis=None, skipna=True): mask = isnull(values) dtype = values.dtype if skipna and _na_ok_dtype(dtype): values = values.copy() np.putmask(values, mask, np.inf) values = _view_if_needed(values) # numpy 1.6.1 workaround in Python 3.x if (values.dtype == np.object_ and sys.version_info[0] >= 3): # pragma: no cover import __builtin__ if values.ndim > 1: apply_ax = axis if axis is not None else 0 result = np.apply_along_axis(__builtin__.min, apply_ax, values) else: result = __builtin__.min(values) else: if ((axis is not None and values.shape[axis] == 0) or values.size == 0): result = com.ensure_float(values.sum(axis)) result.fill(np.nan) else: result = values.min(axis) result = _wrap_results(result,dtype) return _maybe_null_out(result, axis, mask)
def splay(queue, n, kernel_specific_max_wg_size=None): dev = queue.device max_work_items = _builtin_min(128, dev.max_work_group_size) if kernel_specific_max_wg_size is not None: from __builtin__ import min max_work_items = min(max_work_items, kernel_specific_max_wg_size) min_work_items = _builtin_min(32, max_work_items) max_groups = dev.max_compute_units * 4 * 8 # 4 to overfill the device # 8 is an Nvidia constant--that's how many # groups fit onto one compute device if n < min_work_items: group_count = 1 work_items_per_group = min_work_items elif n < (max_groups * min_work_items): group_count = (n + min_work_items - 1) // min_work_items work_items_per_group = min_work_items elif n < (max_groups * max_work_items): group_count = max_groups grp = (n + min_work_items - 1) // min_work_items work_items_per_group = ( (grp + max_groups - 1) // max_groups) * min_work_items else: group_count = max_groups work_items_per_group = max_work_items #print "n:%d gc:%d wipg:%d" % (n, group_count, work_items_per_group) return (group_count * work_items_per_group, ), (work_items_per_group, )
def numpy_max_pool_2d_stride_padding(x, ds, ignore_border=True, st=None, padding=(0, 0)): pad_h = padding[0] pad_w = padding[1] h = x.shape[-2] w = x.shape[-1] assert ds[0] > pad_h assert ds[1] > pad_w def pad_img(x): fill = x.min() - 1 t = numpy.ones((x.shape[0], x.shape[1], 1, 1)) ud_bar = (numpy.zeros( (pad_h, w)) + fill)[numpy.newaxis, numpy.newaxis, :, :] * t lr_bar = (numpy.zeros( (pad_h * 2 + h, pad_w)) + fill)[numpy.newaxis, numpy.newaxis, :, :] * t y = numpy.concatenate([ud_bar, x, ud_bar], axis=2) y = numpy.concatenate([lr_bar, y, lr_bar], axis=3) return y img_rows = h + 2 * pad_h img_cols = w + 2 * pad_w out_r = (img_rows - ds[0]) // st[0] + 1 out_c = (img_cols - ds[1]) // st[1] + 1 out_shp = list(x.shape[:-2]) out_shp.append(out_r) out_shp.append(out_c) ds0, ds1 = ds st0, st1 = st output_val = numpy.zeros(out_shp) tt = [] y = pad_img(x) for k in numpy.ndindex(*x.shape[:-2]): for i in range(output_val.shape[-2]): ii_st = i * st[0] ii_end = __builtin__.min(ii_st + ds[0], img_rows) for j in range(output_val.shape[-1]): jj_st = j * st[1] jj_end = __builtin__.min(jj_st + ds[1], img_cols) patch = y[k][ii_st:ii_end, jj_st:jj_end] output_val[k][i, j] = numpy.max(patch) return output_val
def minmax(cp, size): _check_params(len(cp), size) max_sample, min_sample = 0, 0 for sample in _get_samples(cp, size): max_sample = __builtin__.max(sample, max_sample) min_sample = __builtin__.min(sample, min_sample) return min_sample, max_sample
def test_simplify(self): """Test for simplify().""" A, B, C, n1, n2, n3 = self.A, self.B, self.C, self.n1, self.n2, self.n3 self.assertEqual(min(min(A, B), C)(), min(A, min(B, C))()) self.assertEqual(Min(A)(), A) self.assertEqual(Min()(), float("-inf")) self.assertEqual(Min(n1, n2)(), __builtin__.min(n1, n2))
def minmax(cp, size): _check_params(len(cp), size) min_sample, max_sample = 0x7FFFFFFF, -0x80000000 for sample in _get_samples(cp, size): max_sample = builtins.max(sample, max_sample) min_sample = builtins.min(sample, min_sample) return min_sample, max_sample
def test_simplify(self): """Test for simplify().""" A, B, C, n1, n2, n3 = self.A, self.B, self.C, self.n1, self.n2, self.n3 self.assertEqual(min(min(A, B), C)(), min(A, min(B, C))()) self.assertEqual(Min(A)(), A) self.assertEqual(Min()(), None) self.assertEqual(Min(n1, n2)(), __builtin__.min(n1, n2))
def minmax(cp, size): _check_params(len(cp), size) min_sample, max_sample = 0x7fffffff, -0x80000000 for sample in _get_samples(cp, size): max_sample = builtins.max(sample, max_sample) min_sample = builtins.min(sample, min_sample) return min_sample, max_sample
def min(*args, **kwargs): """Symbolic minimum.""" if len(args) > 1: return min(args, **kwargs) # 1 argument: iterable if isinstance(args[0], Range): return args[0].min() if any(isinstance(arg, Expression) for arg in args[0]): return Min(*args[0]) return __builtin__.min(*args, **kwargs)
def _diagonal_mapper(array, ex): if ex.ul[0] >= ex.ul[1] and ex.ul[0] < ex.lr[1]: # Below the diagonal. above, below = False, True elif ex.ul[1] >= ex.ul[0] and ex.ul[1] < ex.lr[0]: # Above the diagonal. above, below = True, False else: # Not on the diagonal. return start = ex.ul[above] stop = __builtin__.min(ex.lr[above], ex.lr[below]) result = np.ndarray((stop - start, )) data = array.fetch(ex) index = 0 for i in range(start, stop): result[index] = data[i - ex.ul[0], i - ex.ul[1]] index += 1 res_ex = extent.create((start, ), (stop, ), (__builtin__.min(array.shape), )) yield (res_ex, result)
def ending_in_common(str0, str1): substr = '' len0 = len(str0) len1 = len(str1) if len0 > 0 and len1 > 0: i = 0 for i in xrange(min(len0, len1)): if str0[len0 - i - 1] != str1[len1 - i - 1]: break substr = str0[len0 - i:] return substr
def beginning_in_common(str0, str1): substr = '' len0 = len(str0) len1 = len(str1) if len0 > 0 and len1 > 0: i = 0 for i in xrange(min(len0, len1)): if str0[i] != str1[i]: break substr = str0[:i] return substr
def numpy_max_pool_2d_stride_padding( x, ds, ignore_border=True, st=None, padding=(0, 0)): pad_h = padding[0] pad_w = padding[1] h = x.shape[-2] w = x.shape[-1] assert ds[0] > pad_h assert ds[1] > pad_w def pad_img(x): fill = x.min()-1 t = numpy.ones((x.shape[0], x.shape[1], 1, 1)) ud_bar = (numpy.zeros((pad_h, w)) + fill)[ numpy.newaxis, numpy.newaxis, :, :] * t lr_bar = (numpy.zeros((pad_h * 2 + h, pad_w)) + fill)[ numpy.newaxis, numpy.newaxis, :, :] * t y = numpy.concatenate([ud_bar, x, ud_bar], axis=2) y = numpy.concatenate([lr_bar, y, lr_bar], axis=3) return y img_rows = h + 2 * pad_h img_cols = w + 2 * pad_w out_r = (img_rows - ds[0]) // st[0] + 1 out_c = (img_cols - ds[1]) // st[1] + 1 out_shp = list(x.shape[:-2]) out_shp.append(out_r) out_shp.append(out_c) ds0, ds1 = ds st0, st1 = st output_val = numpy.zeros(out_shp) tt = [] y = pad_img(x) for k in numpy.ndindex(*x.shape[:-2]): for i in range(output_val.shape[-2]): ii_st = i * st[0] ii_end = __builtin__.min(ii_st + ds[0], img_rows) for j in range(output_val.shape[-1]): jj_st = j * st[1] jj_end = __builtin__.min(jj_st + ds[1], img_cols) patch = y[k][ii_st:ii_end, jj_st:jj_end] output_val[k][i, j] = numpy.max(patch) return output_val
def __init__(self, name=None, type=None, min=None, max=None, dict=None, converter=None): self.name = name self.type = type self.min = min self.max = max self.dict = dict self.converter = converter # Automatically calculate min and max for enum parameters if self.dict is not None: self.min = self.min if self.min is not None else __builtin__.min(self.dict.itervalues()) self.max = self.max if self.max is not None else __builtin__.max(self.dict.itervalues())
def perform(self, node, inp, out): x, maxout, gz = inp gx_stg, = out # number of pooling output rows pr = maxout.shape[-2] # number of pooling output cols pc = maxout.shape[-1] ds0, ds1 = self.ds st0, st1 = self.st pad_h = self.padding[0] pad_w = self.padding[1] img_rows = x.shape[-2] + 2 * pad_h img_cols = x.shape[-1] + 2 * pad_w # pad the image if self.padding != (0, 0): fill = x.min() - 1 y = numpy.zeros((x.shape[0], x.shape[1], img_rows, img_cols), dtype=x.dtype) + fill y[:, :, pad_h:(img_rows - pad_h), pad_w:(img_cols - pad_w)] = x else: y = x gx = numpy.zeros_like(y) for n in xrange(x.shape[0]): for k in xrange(x.shape[1]): for r in xrange(pr): row_st = r * st0 row_end = __builtin__.min(row_st + ds0, img_rows) for c in xrange(pc): col_st = c * st1 col_end = __builtin__.min(col_st + ds1, img_cols) for row_ind in xrange(row_st, row_end): for col_ind in xrange(col_st, col_end): if (maxout[n, k, r, c] == y[n, k, row_ind, col_ind]): gx[n, k, row_ind, col_ind] += gz[n, k, r, c] # unpad the image gx = gx[:, :, pad_h:(img_rows - pad_h), pad_w:(img_cols - pad_w)] gx_stg[0] = gx
def min(sequence): """ Returns the minimum value in sequence, which must be non-empty. >>> min([3, 1, 4, 1, 5, 9]) 1 >>> min([]) Traceback (most recent call last): ... ValueError: min() arg is an empty sequence """ return __builtin__.min(sequence)
def perform(self, node, inp, out): """ """ x, = inp z, = out if len(x.shape) != 4: raise NotImplementedError( 'DownsampleFactorMax requires 4D input for now') z_shape = self.out_shape(x.shape, self.ds, self.ignore_border, self.st) if (z[0] is None) or (z[0].shape != z_shape): z[0] = numpy.zeros( self.out_shape(x.shape, self.ds, self.ignore_border, self.st)) z[0] = theano._asarray(z[0], dtype=x.dtype) zz = z[0] ## zz needs to be initialized with -inf for the following to work zz -= numpy.inf #number of pooling output rows pr = zz.shape[-2] #number of pooling output cols pc = zz.shape[-1] ds0, ds1 = self.ds st0, st1 = self.st img_rows = x.shape[-2] img_cols = x.shape[-1] for n in xrange(x.shape[0]): for k in xrange(x.shape[1]): for r in xrange(pr): row_st = r * st0 row_end = __builtin__.min(row_st + ds0, img_rows) for c in xrange(pc): col_st = c * st1 col_end = __builtin__.min(col_st + ds1, img_cols) for row_ind in xrange(row_st, row_end): for col_ind in xrange(col_st, col_end): zz[n, k, r, c] = \ __builtin__.max(zz[n, k, r, c], x[n, k, row_ind, col_ind])
def perform(self, node, inp, out): x, maxout, gz = inp gx_stg, = out # number of pooling output rows pr = maxout.shape[-2] # number of pooling output cols pc = maxout.shape[-1] ds0, ds1 = self.ds st0, st1 = self.st pad_h = self.padding[0] pad_w = self.padding[1] img_rows = x.shape[-2] + 2 * pad_h img_cols = x.shape[-1] + 2 * pad_w # pad the image if self.padding != (0, 0): fill = x.min()-1 y = numpy.zeros( (x.shape[0], x.shape[1], img_rows, img_cols), dtype=x.dtype) + fill y[:, :, pad_h:(img_rows-pad_h), pad_w:(img_cols-pad_w)] = x else: y = x gx = numpy.zeros_like(y) for n in xrange(x.shape[0]): for k in xrange(x.shape[1]): for r in xrange(pr): row_st = r * st0 row_end = __builtin__.min(row_st + ds0, img_rows) for c in xrange(pc): col_st = c * st1 col_end = __builtin__.min(col_st + ds1, img_cols) for row_ind in xrange(row_st, row_end): for col_ind in xrange(col_st, col_end): if (maxout[n, k, r, c] == y[n, k, row_ind, col_ind]): gx[n, k, row_ind, col_ind] += gz[n, k, r, c] # unpad the image gx = gx[:, :, pad_h:(img_rows-pad_h), pad_w:(img_cols-pad_w)] gx_stg[0] = gx
def FindZeroAngle(self): from __builtin__ import max, min, sorted # input x = self.Angle y = self.DetCtr # limits y_max = max(y) y_low = 0.01 * y_max # find suitable x range x_min = max(x) x_max = min(x) for xi, yi in zip(x, y): if yi > y_low: if x_min > xi: x_min = xi if x_max < xi: x_max = xi # sampling x_sam = self.linspace(x_min, x_max, num=500) y_sam = self.sample(x, y, x_sam) # normalized cross-correlation y_cnv = self.normxcorr(y_sam, y_sam) x_cnv = self.linspace(x_min, x_max, num=len(y_cnv)) # find suitable maximum of y_cnv yLevel = 0.5 * y_max maxima = self.localmaxima(x_cnv, y_cnv) maxima = [m for m in maxima if m[1] > 0.0] # ignore negative matches maxima = [m for m in maxima if self.sample(x, y, m[0]) > yLevel ] # only consider high y values maxima = sorted(maxima, key=lambda m: m[1], reverse=True) # best fit first if not maxima: self.PeakAng = x[y.index(y_max)] self.PeakVal = y_max else: x_cnv_max, y_cnv_max, i_cnv_max = maxima[0] self.PeakAng = self.maximumX(x_cnv, y_cnv, i_cnv_max) self.PeakVal = y_max print "Peak Angle:", self.PeakAng print "I(rock):", self.PeakVal return self.PeakAng
def min(*args): #"""Maximum element of a vector.""" # need to upgrade this to handle things like min(matrix((1,2)), matrix((2,3))). if len(args) > 1: if not getoptvars(args) and not getparams(args): r = __builtin__.min([rows(x) for x in args]) c = __builtin__.min([cols(x) for x in args]) if (r, c) == (1, 1): return __builtin__.min(args) args = list(args) for i in xrange(len(args)): if size(args[i]) == (1, 1): args[i] = args[i] * matrix(ones(r, c)) elif size(args[i]) != (r, c): raise AtomArgsError('incompatible arguments to min') z = zeros(r, c) for i in range(r * c): z[i] = __builtin__.min([x[i] for x in args]) return z else: return multiargminfunction(args) else: arg = args[0] if iterable(arg): return min(*arg) elif isinstance(arg, (int, float)): return arg elif not getoptvars(arg) and not getparams(arg): return __builtin__.min(arg) elif is1x1(arg): return arg else: return singleargminfunction(arg)
def min(*args): #"""Maximum element of a vector.""" # need to upgrade this to handle things like min(matrix((1,2)), matrix((2,3))). if len(args) > 1: if not getoptvars(args) and not getparams(args): r = __builtin__.min([rows(x) for x in args]) c = __builtin__.min([cols(x) for x in args]) if (r,c) == (1,1): return __builtin__.min(args) args = list(args) for i in xrange(len(args)): if size(args[i]) == (1,1): args[i] = args[i]*matrix(ones(r,c)) elif size(args[i]) != (r,c): raise AtomArgsError('incompatible arguments to min') z = zeros(r, c) for i in range(r*c): z[i] = __builtin__.min([x[i] for x in args]) return z else: return multiargminfunction(args) else: arg = args[0] if iterable(arg): return min(*arg) elif isinstance(arg, (int, float)): return arg elif not getoptvars(arg) and not getparams(arg): return __builtin__.min(arg) elif is1x1(arg): return arg else: return singleargminfunction(arg)
def __call__(self, *args): while type(args[0]) is list: args = args[0] x = args flag = False for xi in x: if isinstance(xi, expr): flag = True break if not flag: return __builtin__.min(x) y = [] for i in range(len(x)): if isNumber(x[i]): y.append(scalar(x[i])) else: y.append(x[i]) return expr(self, y)