Esempio n. 1
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]

        if cls.SINCE_VERSION == 1:
            shape = np.array(node.attrs["shape"])
        else:  # since_version >= 5
            shape = cls.get_constant(inputs[1])

        input_shape = np.array(inputs[0][2].shape)
        shape = [
            dim if dim != 0 else input_shape[idx]
            for idx, dim in enumerate(shape)
        ]
        if -1 in shape:
            wild_index = shape.index(-1)
            in_size = prod([1 if dim is None else dim for dim in input_shape])
            shape_size = prod(
                [1 if dim is None or dim <= 0 else dim for dim in shape])
            if in_size % shape_size != 0:
                raise ValueError('invalid reshape')
            shape[wild_index] = in_size // shape_size
        shape = np.array(shape)

        if cls.is_constant(inputs[0]):
            logger.info("reducing %s to a constant", valid_name)
            params = ConstantInputParameters(valid_name,
                                             value=cls.get_constant(
                                                 inputs[0]).reshape(shape),
                                             dims=Dim.unnamed(shape),
                                             constant_store=G.constant_store)
            pshape = ProvisionalDim(shape)
            all_nodes[node.output[0]] = (params, 0, pshape)
            return params

        # TODO - There must be a better way of doing this
        # This hacks around the fact that the batch dimension will be in the reshape
        if input_shape[0] is None and shape[0] == 1:
            shape = np.array([None] + list(shape[1::]))

        pshape = ProvisionalDim(shape)
        # pylint: disable=singleton-comparison
        old_shape = Dim.unnamed(list(input_shape[input_shape != None]))
        shape = Dim.unnamed(list(shape[shape != None]))
        params = ReshapeParameters(valid_name,
                                   old_shape=old_shape,
                                   shape=shape)
        inp = inputs[0]
        G.add_edge(
            NNEdge(from_node=inp[0], to_node=params, from_idx=inp[1],
                   to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pshape)
        return params
Esempio n. 2
0
 def _kernd(self, X, x, h):
   '''Computes separable Gaussian kernel (product of m 1-dim kernels for each coordinate) for each of the n, m-dim feature points of X 
   relative to point x with scale parameters h (see _kern parameters)
   
   Returns
   -------
   w: 1-dim, length n array of weights of feature points X relative to x
   '''
   w = prod(self._kern(X, x, h), axis = 1)  
   return w
Esempio n. 3
0
def find_largest_horizontal_product(grid):
    largest = 0
    for i in range(len(grid)):
        for j in range(len(grid[i]) - (NUM_ADJACENT - 1)):
            
            temp = prod(grid[i][j:j + NUM_ADJACENT]) 
            if temp > largest:
                largest = temp
            pass
    
    return largest
Esempio n. 4
0
 def expand_to_chw(self):
     self._verify_is_ordered()
     if self.is_named and self.has_keys(['c', 'h', 'w']):
         return self
     if len(self.shape) == 0:
         return Dim.named(c=1, h=1, w=1)
     if len(self.shape) == 1:
         return Dim.named(c=1, h=1, w=self.shape[0])
     if len(self.shape) == 2:
         return Dim.named(c=1, h=self.shape[0], w=self.shape[1])
     if len(self.shape) == 3:
         return Dim.named(c=self.shape[0], h=self.shape[1], w=self.shape[2])
     return Dim.named(c=prod(self.shape[:-2:]), h=self.shape[-2], w=self.shape[-1])
Esempio n. 5
0
def is_channel_vector(node, idx):
    shape = node.in_dims[idx].shape
    return shape[0] == prod(shape)
Esempio n. 6
0
X = '73167176531330624919225119674426574742355349194934\
96983520312774506326239578318016984801869478851843\
85861560789112949495459501737958331952853208805511\
12540698747158523863050715693290963295227443043557\
66896648950445244523161731856403098711121722383113\
62229893423380308135336276614282806444486645238749\
30358907296290491560440772390713810515859307960866\
70172427121883998797908792274921901699720888093776\
65727333001053367881220235421809751254540594752243\
52584907711670556013604839586446706324415722155397\
53697817977846174064955149290862569321978468622482\
83972241375657056057490261407972968652414535100474\
82166370484403199890008895243450658541227588666881\
16427171479924442928230863465674813919123162824586\
17866458359124566529476545682848912883142607690042\
24219022671055626321111109370544217506941658960408\
07198403850962455444362981230987879927244284909188\
84580156166097919133875499200524063689912560717606\
05886116467109405077541002256983155200055935729725\
71636269561882670428252483600823257530420752963450'

productList = []

from numpy.core.fromnumeric import prod

for i in range(0,987):
    tempString = X[i:i+13]
    ProductResult = prod([float(j) for j in tempString])
    productList.append(ProductResult)
    
print max(productList)
Esempio n. 7
0
    def _common(cls, node, **kwargs):
        all_nodes = kwargs['all_nodes']
        G = kwargs['G']
        valid_name = kwargs['valid_name']
        inputs = [all_nodes[inp] for inp in node.input]

        if cls.SINCE_VERSION == 1:
            shape = np.array(node.attrs["shape"])
        else:  # since_version >= 5
            shape = cls.get_constant(inputs[1])

        input_shape = np.array(inputs[0][2].shape)
        shape = [
            dim if dim != 0 else input_shape[idx]
            for idx, dim in enumerate(shape)
        ]
        # this catches a special case where inp is something like [None, 2, 4] and shape is [2, -1, 4]
        # The -1 is clearly the None moving so move it
        if cls.moves_unknown(input_shape, shape):
            shape = np.array([None if dim == -1 else dim for dim in shape])
        else:
            if -1 in shape:
                new_shape_size = reduce(
                    lambda x, y: x * 1
                    if y is None or y == -1 else x * y, shape, 1)
                inp_size = reduce(lambda x, y: x * y
                                  if y is not None else x, input_shape, 1)
                in_size = prod(
                    [1 if dim is None else dim for dim in input_shape])
                shape_size = prod([1 if dim is None else dim for dim in shape])
                if in_size % shape_size != 0:
                    raise ValueError('invalid reshape')
                shape[shape.index(-1)] = inp_size // new_shape_size
            shape = np.array(shape)
            # TODO - There must be a better way of doing this
            # This hacks around the fact that the batch dimension will be in the reshape
            if input_shape[0] is None and shape[0] == 1:
                shape = np.array([None] + list(shape[1::]))

        inp = inputs[0]
        if cls.is_constant(inp):
            # there should be no None in shape since a constant always has known size
            logger.info("reducing %s to a constant", valid_name)
            params = ConstantInputParameters(
                valid_name,
                value=cls.get_constant(inp).reshape(shape),
                dims=Dim.unnamed(shape))
            pshape = ProvisionalDim(shape)
            all_nodes[node.output[0]] = (params, 0, pshape, inp[3])
            return params

        pshape = ProvisionalDim(shape)
        # pylint: disable=singleton-comparison
        old_shape = Dim.unnamed(list(input_shape[input_shape != None]))
        shape = Dim.unnamed(list(shape[shape != None]))
        params = ReshapeParameters(valid_name,
                                   old_shape=old_shape,
                                   shape=shape)
        G.add_edge(
            NNEdge(from_node=inp[0], to_node=params, from_idx=inp[1],
                   to_idx=0))
        all_nodes[node.output[0]] = (params, 0, pshape, inp[3])
        return params