예제 #1
0
    def quantized_bprop(self, cost):
        """
        bprop equals:
        (active_prime) *elem_multiply* error_signal_in * (rep of previous layer)
        (rep of previous layer) is recoded as self.x during fprop() process.
        Here we quantize (rep of previous layer) and leave the rest as it is.
        """
        # the lower 2**(integer power)
        index_low = T.switch(self.x > 0., T.floor(T.log2(self.x)),
                             T.floor(T.log2(-self.x)))
        sign = T.switch(self.x > 0., 1., -1.)
        # index_up = index_low + 1  # the upper 2**(integer power) though not used explicitly.
        p_up = sign * self.x / 2**(index_low) - 1  # percentage of upper index.
        srng = theano.sandbox.rng_mrg.MRG_RandomStreams(
            self.rng.randint(999999))
        index_random = index_low + srng.binomial(
            n=1, p=p_up, size=T.shape(self.x), dtype=theano.config.floatX)
        quantized_rep = sign * 2**index_random
        # there is sth wrong with this self-made backprop:
        # the code is using BN, but this type of explicitly computation is not considering
        # gradients caused by BN.
        # error = self.activation_prime(self.z) * error_signal_in
        error = T.grad(cost=cost, wrt=self.z)
        self.dEdW = T.dot(quantized_rep.T, error)

        self.dEdb = T.grad(cost=cost, wrt=self.b)

        if self.BN == True:
            self.dEda = T.grad(cost=cost, wrt=self.a)
예제 #2
0
    def quantized_bprop(self, cost):
        """
        bprop for convolution layer equals:
        
        (
            self.x.dimshuffle(1, 0, 2, 3)       (*) 
            T.grad(cost, wrt=#convoutput).dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1]
        ).dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1]

        '(*)'stands for convolution.
        Here we quantize (rep of previous layer) and leave the rest as it is.
        """
        # the lower 2**(integer power)
        index_low = T.switch(self.x > 0., T.floor(T.log2(self.x)), T.floor(T.log2(-self.x)))
        index_low = T.clip(index_low, -4, 3)
        sign = T.switch(self.x > 0., 1., -1.)
        #index_up = index_low + 1  # the upper 2**(integer power) though not used explicitly.
        p_up = sign * self.x / 2**(index_low) - 1  # percentage of upper index.
        srng = theano.sandbox.rng_mrg.MRG_RandomStreams(self.rng.randint(999999))
        index_random = index_low + srng.binomial(n=1, p=p_up, size=T.shape(self.x), dtype=theano.config.floatX)
        quantized_rep = sign * 2**index_random
        error = T.grad(cost=cost, wrt=self.conv_z)

        self.dEdW = T.nnet.conv.conv2d(
            input=quantized_rep.dimshuffle(1, 0, 2, 3),
            filters=error.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1]
        ).dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1]

        self.dEdb = T.grad(cost=cost, wrt=self.b)

        if self.BN == True:
            self.dEda = T.grad(cost=cost, wrt=self.a)
예제 #3
0
    def quantized_bprop(self, cost):
        """
        bprop equals:
        (active_prime) *elem_multiply* error_signal_in * (rep of previous layer)
        (rep of previous layer) is recoded as self.x during fprop() process.
        Here we quantize (rep of previous layer) and leave the rest as it is.
        """
        # the lower 2**(integer power)
        index_low = T.switch(self.x > 0., T.floor(T.log2(self.x)), T.floor(T.log2(-self.x)))
        
        index_low = T.clip(index_low, -4, 3)
        sign = T.switch(self.x > 0., 1., -1.)
        #index_up = index_low + 1  # the upper 2**(integer power) though not used explicitly.
        p_up = sign * self.x / 2**(index_low) - 1  # percentage of upper index.
        srng = theano.sandbox.rng_mrg.MRG_RandomStreams(self.rng.randint(999999))
        index_random = index_low + srng.binomial(n=1, p=p_up, size=T.shape(self.x), dtype=theano.config.floatX)
        quantized_rep = sign * 2**index_random
        # there is sth wrong with this self-made backprop: 
        # the code is using BN, but this type of explicit computation is not considering 
        # gradients caused by BN.
        # error = self.activation_prime(self.z) * error_signal_in
        error = T.grad(cost=cost, wrt=self.z)
        self.dEdW = T.dot(quantized_rep.T, error)
        #self.dEdW = T.dot(self.x.T, error)

        self.dEdb = T.grad(cost=cost, wrt=self.b)

        if self.BN == True:
            self.dEda = T.grad(cost=cost, wrt=self.a)
예제 #4
0
def binarization(W,
                 H,
                 binary=True,
                 deterministic=False,
                 stochastic=False,
                 srng=None):

    # (deterministic == True) <-> test-time <-> inference-time
    if not binary or (deterministic and stochastic):
        # print("not binary")
        Wb = W

    else:

        # [-1,1] -> [0,1]
        Wb = hard_sigmoid(W / H)
        # Wb = T.clip(W/H,-1,1)

        # Stochastic BinaryConnect
        if stochastic:
            print(
                'Warning: stochastic is disabled during weight quantization changing to deterministic'
            )
            Wb = T.floor(Wb * 8) / 7

            # Deterministic BinaryConnect (round to nearest)
        else:
            # print("det")
            Wb = T.floor(Wb * 8) / 7

        # [0, 1] -> [-1, 1]
        Wb = 2 * Wb - 1

    return Wb
예제 #5
0
    def quantized_bprop(self, cost):
        """
        bprop for convolution layer equals:

        (
            self.x.dimshuffle(1, 0, 2, 3)       (*)
            T.grad(cost, wrt=#convoutput).dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1]
        ).dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1]

        '(*)'stands for convolution.
        Here we quantize (rep of previous layer) and leave the rest as it is.
        """
        # the lower 2**(integer power)
        index_low = T.switch(self.x > 0., T.floor(T.log2(self.x)),
                             T.floor(T.log2(-self.x)))
        index_low = T.clip(index_low, -4, 3)
        sign = T.switch(self.x > 0., 1., -1.)
        #index_up = index_low + 1  # the upper 2**(integer power) though not used explicitly.
        p_up = sign * self.x / 2**(index_low) - 1  # percentage of upper index.
        srng = theano.sandbox.rng_mrg.MRG_RandomStreams(
            self.rng.randint(999999))
        index_random = index_low + srng.binomial(
            n=1, p=p_up, size=T.shape(self.x), dtype=theano.config.floatX)
        quantized_rep = sign * 2**index_random
        error = T.grad(cost=cost, wrt=self.conv_z)

        self.dEdW = T.nnet.conv.conv2d(
            input=quantized_rep.dimshuffle(1, 0, 2, 3),
            filters=error.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1]).dimshuffle(
                1, 0, 2, 3)[:, :, ::-1, ::-1]

        self.dEdb = T.grad(cost=cost, wrt=self.b)

        if self.BN == True:
            self.dEda = T.grad(cost=cost, wrt=self.a)
def biLinearInterpolation_Coeffs_tt(x, y, matrix, x0_limit, y0_limit, x1_limit, y1_limit):

    x_in, y_in = x - x0_limit, y - y0_limit

    x0 = tt.floor(x_in)
    x1 = x0 + 1
    y0 = tt.floor(y_in)
    y1 = y0 + 1

    x0 = tt.cast(tt.clip(x0, 0, x1_limit), 'int32')
    x1 = tt.cast(tt.clip(x1, 0, x1_limit), 'int32')
    y0 = tt.cast(tt.clip(y0, 0, y1_limit), 'int32')
    y1 = tt.cast(tt.clip(y1, 0, y1_limit), 'int32')

    Q11 = matrix[y0, x0]
    Q21 = matrix[y1, x0]
    Q12 = matrix[y0, x1]
    Q22 = matrix[y1, x1]

    den12 = (x0-x1) / (y0-y1)
    den21 = (x0-x1) / (y1-y0)

    a0 = Q11 * x1*y1 / den12 + Q12 * x1*y0 / den21 + Q21 * x0*y1 / den21 + Q22 * x0*y0 / den12
    a1 = Q11 * y1 / den21 + Q12 * y0 / den12 + Q21 * y1 / den12 + Q22 * y0 / den21
    a2 = Q11 * x1 / den21 + Q12 * x1 / den12 + Q21 * x0 / den12 + Q22 * x0 / den21
    a3 = Q11 / den12 + Q12 / den21 + Q21 / den21 + Q22 / den12

    return a0, a1, a2, a3
예제 #7
0
def quantize_weights(W, srng=None, bitlimit=None, deterministic=False):
    """
    Exponential quantization
    :param W: Weights
    :param srng: random number generator
    :param bitlimit: limit values to be in power of 2 range, e.g. for values in 2^-22 to 2^9 set it to [-22, 9]
    :param deterministic: deterministic rounding
    :return: quantized weights
    """
    bitlimit = [-22, 9]  #hardcoded for experiments
    if srng is None:
        rng = np.random.RandomState(666)
        srng = theano.sandbox.rng_mrg.MRG_RandomStreams(rng.randint(999999))

    if bitlimit:
        index_low = T.clip(
            T.switch(W > 0., T.floor(T.log2(W)), T.floor(T.log2(-W))),
            bitlimit[0], bitlimit[1])
    else:
        index_low = T.switch(W > 0., T.floor(T.log2(W)), T.floor(T.log2(-W)))
    sign = T.switch(W > 0., 1., -1.)
    p_up = sign * W / 2**(index_low) - 1  # percentage of upper index.
    if deterministic:
        index_deterministic = index_low + T.switch(p_up > 0.5, 1, 0)
        quantized_W = sign * 2**index_deterministic
    else:
        index_random = index_low + srng.binomial(
            n=1, p=p_up, size=T.shape(W), dtype=theano.config.floatX)
        quantized_W = sign * 2**index_random
    return quantized_W
예제 #8
0
def discrete_grads(loss,network,LR):
    global update_type,best_params,H,N,th # th is a parameter that controls the nonlinearity of state transfer probability

    W_params = lasagne.layers.get_all_params(network, discrete=True) #Get all the weight parameters
    layers = lasagne.layers.get_all_layers(network)
	
    W_grads = []
    for layer in layers:
        params = layer.get_params(discrete=True)
        if params:
            W_grads.append(theano.grad(loss, wrt=layer.W)) #Here layer.W = weight_tune(param)  
    updates = lasagne.updates.adam(loss_or_grads=W_grads,params=W_params,learning_rate=LR)  

    for param, parambest in izip(W_params, best_params) :

        L = 2*H/pow(2,N) #state step length in Z_N 
		
        a=random.random() #c is a random variable with binary value       
        if a<0.85:
           c = 1
        else:
           c = 0
        
        b=random.random()
        state_rand = T.round(b*pow(2,N))*L-H #state_rand is a random state in the discrete weight space Z_N
        
        delta_W1 =c*(state_rand-parambest)#parambest would transfer to state_rand with probability of a, or keep unmoved with probability of 1-a
        delta_W1_direction = T.cast(T.sgn(delta_W1),theano.config.floatX)
	dis1=T.abs_(delta_W1) #the absolute distance
        k1=delta_W1_direction*T.floor(dis1/L) #the integer part
        v1=delta_W1-k1*L #the decimal part
        Prob1= T.abs_(v1/L) #the transfer probability
	Prob1 = T.tanh(th*Prob1) #the nonlinear tanh() function accelerates the state transfer
		   
        delta_W2 = updates[param] - param
        delta_W2_direction = T.cast(T.sgn(delta_W2),theano.config.floatX)
	dis2=T.abs_(delta_W2) #the absolute distance
        k2=delta_W2_direction*T.floor(dis2/L) #the integer part
        v2=delta_W2-k2*L #the decimal part
        Prob2= T.abs_(v2/L) #the transfer probability
        Prob2 = T.tanh(th*Prob2) #the nonlinear tanh() function accelerates the state transfer  
       
        srng = RandomStreams(lasagne.random.get_rng().randint(1, 2147462579))
        Gate1 = T.cast(srng.binomial(n=1, p=Prob1, size=T.shape(Prob1)), theano.config.floatX) # Gate1 is a binary variable with probability of Prob1 to be 1
        Gate2 = T.cast(srng.binomial(n=1, p=Prob2, size=T.shape(Prob2)), theano.config.floatX) # Gate2 is a binary variable with probability of Prob2 to be 1

        delta_W1_new=(k1+delta_W1_direction*Gate1)*L #delta_W1_new = k*L where k is an integer
        updates_param1 = T.clip(parambest + delta_W1_new,-H,H)
        updates_param1 = weight_tune(updates_param1,-H,H) #fine tuning for guaranteeing each element strictly constrained in the discrete space

        delta_W2_new=(k2+delta_W2_direction*Gate2)*L #delta_W2_new = k*L where k is an integer  
        updates_param2 = T.clip(param + delta_W2_new,-H,H)
        updates_param2 = weight_tune(updates_param2,-H,H) #fine tuning for guaranteeing each element strictly constrained in the discrete space

		# if update_type<100, the weight probabilistically tranfers from parambest to state_rand, which helps to search the global minimum
        # elst it would probabilistically transfer from param to a state nearest to updates[param]	
        updates[param]= T.switch(T.lt(update_type,100), updates_param1, updates_param2) 
      
    return updates
예제 #9
0
def eval_volume_at_3d_coordinates_in_theano(volume, coords, strides=None):
    """ Evaluates the data volume at given coordinates using trilinear interpolation.

    This function is a Theano version of `learn2track.utils.eval_volume_at_3d_coordinates`.

    Parameters
    ----------
    volume : 3D array or 4D array
        Data volume.
    coords : ndarray of shape (N, 3)
        3D coordinates where to evaluate the volume data.
    strides : tuple
        Strides of the volume (for speedup). Default: detected automatically.

    References
    ----------
    [1] https://spie.org/samples/PM159.pdf
    """
    if volume.ndim == 3:
        print(
            "eval_volume_at_3d_coordinates_in_theano with volume.ndim == 3 has not been tested."
        )
        indices = T.cast((coords[:, None, :] + idx).reshape((-1, 3)),
                         dtype="int32")
        P = volume[indices[:, 0], indices[:, 1], indices[:, 2]].reshape(
            (coords.shape[0], -1)).T
        # P = advanced_indexing(volume, indices[:, 0], indices[:, 1], indices[:, 2], strides=strides).reshape((coords.shape[0], -1)).T

        d = coords - T.floor(coords)
        dx, dy, dz = d[:, 0], d[:, 1], d[:, 2]
        Q1 = T.stack([
            T.ones_like(dx), d[:, 0], d[:, 1], d[:, 2], dx * dy, dy * dz,
            dx * dz, dx * dy * dz
        ],
                     axis=0)
        values = T.sum(P * T.dot(B1.T, Q1), axis=0)
        return values

    elif volume.ndim == 4:
        indices = T.floor((coords[:, None, :] + idx).reshape((-1, 3)))

        P = advanced_indexing(volume,
                              indices[:, 0],
                              indices[:, 1],
                              indices[:, 2],
                              strides=strides).reshape(
                                  (coords.shape[0], 8, volume.shape[-1])).T

        d = coords - T.floor(coords)
        dx, dy, dz = d[:, 0], d[:, 1], d[:, 2]
        Q1 = T.stack([
            T.ones_like(dx), d[:, 0], d[:, 1], d[:, 2], dx * dy, dy * dz,
            dx * dz, dx * dy * dz
        ],
                     axis=0)
        values = T.sum(P * T.dot(B1.T, Q1), axis=1).T
        return values
예제 #10
0
def ShiftConv(w_t_g, s_t, N):
    shift = 2.*s_t-1.
    Z = T.mod(shift+N, N)
    simj = 1 - (Z - T.floor(Z))
    imj = T.mod(T.arange(N) + T.iround(T.floor(Z)),N)
    w_t_g_roll_1 = T.roll(w_t_g, -T.iround(T.floor(Z)))
    w_t_g_roll_2 = T.roll(w_t_g, -(T.iround(T.floor(Z))+1))
    w_t_s = w_t_g_roll_1*simj + w_t_g_roll_2*(1-simj)
    return w_t_s
예제 #11
0
def _interpolate(im, x, y, out_height, out_width):
    # *_f are floats
    num_batch, height, width, channels = im.shape
    height_f = T.cast(height, theano.config.floatX)
    width_f = T.cast(width, theano.config.floatX)

    # clip coordinates to [-1, 1]
    x = T.clip(x, -1, 1)
    y = T.clip(y, -1, 1)

    # scale coordinates from [-1, 1] to [0, width/height - 1]
    x = (x + 1) / 2 * (width_f - 1)
    y = (y + 1) / 2 * (height_f - 1)

    # obtain indices of the 2x2 pixel neighborhood surrounding the coordinates;
    # we need those in floatX for interpolation and in int64 for indexing. for
    # indexing, we need to take care they do not extend past the image.
    x0_f = T.floor(x)
    y0_f = T.floor(y)
    x1_f = x0_f + 1
    y1_f = y0_f + 1
    x0 = T.cast(x0_f, 'int64')
    y0 = T.cast(y0_f, 'int64')
    x1 = T.cast(T.minimum(x1_f, width_f - 1), 'int64')
    y1 = T.cast(T.minimum(y1_f, height_f - 1), 'int64')

    # The input is [num_batch, height, width, channels]. We do the lookup in
    # the flattened input, i.e [num_batch*height*width, channels]. We need
    # to offset all indices to match the flat version
    dim2 = width
    dim1 = width*height
    base = T.repeat(
        T.arange(num_batch, dtype='int64')*dim1, out_height*out_width)
    base_y0 = base + y0*dim2
    base_y1 = base + y1*dim2
    idx_a = base_y0 + x0
    idx_b = base_y1 + x0
    idx_c = base_y0 + x1
    idx_d = base_y1 + x1

    # use indices to lookup pixels for all samples
    im_flat = im.reshape((-1, channels))
    Ia = im_flat[idx_a]
    Ib = im_flat[idx_b]
    Ic = im_flat[idx_c]
    Id = im_flat[idx_d]

    # calculate interpolated values
    wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')
    wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')
    wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')
    wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')
    output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)

    assert str(output.dtype) == theano.config.floatX, str(output.dtype)
    return output
예제 #12
0
파일: special.py 프로젝트: rmanor/Lasagne
def _interpolate(im, x, y, out_height, out_width):
    # *_f are floats
    num_batch, height, width, channels = im.shape
    height_f = T.cast(height, theano.config.floatX)
    width_f = T.cast(width, theano.config.floatX)

    # scale indices from [-1, 1] to [0, width/height].
    x = (x + 1) / 2 * width_f
    y = (y + 1) / 2 * height_f

    # Clip indices to ensure they are not out of bounds.
    max_x = width_f - 1
    max_y = height_f - 1
    x0 = T.clip(x, 0, max_x)
    x1 = T.clip(x + 1, 0, max_x)
    y0 = T.clip(y, 0, max_y)
    y1 = T.clip(y + 1, 0, max_y)

    # We need floatX for interpolation and int64 for indexing.
    x0_f = T.floor(x0)
    x1_f = T.floor(x1)
    y0_f = T.floor(y0)
    y1_f = T.floor(y1)
    x0 = T.cast(x0, 'int64')
    x1 = T.cast(x1, 'int64')
    y0 = T.cast(y0, 'int64')
    y1 = T.cast(y1, 'int64')

    # The input is [num_batch, height, width, channels]. We do the lookup in
    # the flattened input, i.e [num_batch*height*width, channels]. We need
    # to offset all indices to match the flat version
    dim2 = width
    dim1 = width*height
    base = T.repeat(
        T.arange(num_batch, dtype='int64')*dim1, out_height*out_width)
    base_y0 = base + y0*dim2
    base_y1 = base + y1*dim2
    idx_a = base_y0 + x0
    idx_b = base_y1 + x0
    idx_c = base_y0 + x1
    idx_d = base_y1 + x1

    # use indices to lookup pixels for all samples
    im_flat = im.reshape((-1, channels))
    Ia = im_flat[idx_a]
    Ib = im_flat[idx_b]
    Ic = im_flat[idx_c]
    Id = im_flat[idx_d]

    # calculate interpolated values
    wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')
    wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')
    wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')
    wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')
    output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)
    return output
예제 #13
0
파일: transform.py 프로젝트: nebw/beras
def _interpolate(im, x, y, out_height, out_width):
    # *_f are floats
    num_batch, height, width, channels = im.shape
    height_f = T.cast(height, theano.config.floatX)
    width_f = T.cast(width, theano.config.floatX)

    # clip coordinates to [-1, 1]
    x = T.clip(x, -1, 1)
    y = T.clip(y, -1, 1)

    # scale coordinates from [-1, 1] to [0, width/height - 1]
    x = (x + 1) / 2 * (width_f - 1)
    y = (y + 1) / 2 * (height_f - 1)

    # obtain indices of the 2x2 pixel neighborhood surrounding the coordinates;
    # we need those in floatX for interpolation and in int64 for indexing. for
    # indexing, we need to take care they do not extend past the image.
    x0_f = T.floor(x)
    y0_f = T.floor(y)
    x1_f = x0_f + 1
    y1_f = y0_f + 1
    x0 = T.cast(x0_f, 'int64')
    y0 = T.cast(y0_f, 'int64')
    x1 = T.cast(T.minimum(x1_f, width_f - 1), 'int64')
    y1 = T.cast(T.minimum(y1_f, height_f - 1), 'int64')

    # The input is [num_batch, height, width, channels]. We do the lookup in
    # the flattened input, i.e [num_batch*height*width, channels]. We need
    # to offset all indices to match the flat version
    dim2 = width
    dim1 = width*height
    base = T.repeat(
        T.arange(num_batch, dtype='int64')*dim1, out_height*out_width)
    base_y0 = base + y0*dim2
    base_y1 = base + y1*dim2
    idx_a = base_y0 + x0
    idx_b = base_y1 + x0
    idx_c = base_y0 + x1
    idx_d = base_y1 + x1

    # use indices to lookup pixels for all samples
    im_flat = im.reshape((-1, channels))
    Ia = im_flat[idx_a]
    Ib = im_flat[idx_b]
    Ic = im_flat[idx_c]
    Id = im_flat[idx_d]

    # calculate interpolated values
    wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')
    wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')
    wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')
    wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')
    output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)

    assert str(output.dtype) == theano.config.floatX, str(output.dtype)
    return output
예제 #14
0
def _interpolate(im, x, y, out_height, out_width):
    # *_f are floats
    num_batch, height, width, channels = im.shape
    height_f = T.cast(height, theano.config.floatX)
    width_f = T.cast(width, theano.config.floatX)

    # scale indices from [-1, 1] to [0, width/height].
    x = (x + 1) / 2 * width_f
    y = (y + 1) / 2 * height_f

    # Clip indices to ensure they are not out of bounds.
    max_x = width_f - 1
    max_y = height_f - 1
    x0 = T.clip(x, 0, max_x)
    x1 = T.clip(x + 1, 0, max_x)
    y0 = T.clip(y, 0, max_y)
    y1 = T.clip(y + 1, 0, max_y)

    # We need floatX for interpolation and int64 for indexing.
    x0_f = T.floor(x0)
    x1_f = T.floor(x1)
    y0_f = T.floor(y0)
    y1_f = T.floor(y1)
    x0 = T.cast(x0, 'int64')
    x1 = T.cast(x1, 'int64')
    y0 = T.cast(y0, 'int64')
    y1 = T.cast(y1, 'int64')

    # The input is [num_batch, height, width, channels]. We do the lookup in
    # the flattened input, i.e [num_batch*height*width, channels]. We need
    # to offset all indices to match the flat version
    dim2 = width
    dim1 = width*height
    base = T.repeat(
        T.arange(num_batch, dtype='int64')*dim1, out_height*out_width)
    base_y0 = base + y0*dim2
    base_y1 = base + y1*dim2
    idx_a = base_y0 + x0
    idx_b = base_y1 + x0
    idx_c = base_y0 + x1
    idx_d = base_y1 + x1

    # use indices to lookup pixels for all samples
    im_flat = im.reshape((-1, channels))
    Ia = im_flat[idx_a]
    Ib = im_flat[idx_b]
    Ic = im_flat[idx_c]
    Id = im_flat[idx_d]

    # calculate interpolated values
    wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')
    wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')
    wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')
    wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')
    output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)
    return output
예제 #15
0
def _interpolate(im, x, y, out_height, out_width):
    # *_f are floats
    num_batch, height, width, channels = im.shape
    height_f = T.cast(height, 'float32')
    width_f = T.cast(width, 'float32')
    zero = T.zeros([], dtype='int64')
    max_y = im.shape[1] - 1
    max_x = im.shape[2] - 1

    # scale indices from [-1, 1] to [0, width/height].
    x = (x + 1.0)*(width_f) / 2.0
    y = (y + 1.0)*(height_f) / 2.0

    x0 = T.cast(T.floor(x), 'int64')
    x1 = x0 + 1
    y0 = T.cast(T.floor(y), 'int64')
    y1 = y0 + 1

    # Clip indicies to ensure they are not out of bounds.
    x0 = T.clip(x0, zero, max_x)
    x1 = T.clip(x1, zero, max_x)
    y0 = T.clip(y0, zero, max_y)
    y1 = T.clip(y1, zero, max_y)

    # The input is [num_batch, height, width, channels]. We do the lookup in
    # the flattened input, i.e [num_batch*height*width, channels]. We need
    # to offset all indices to match the flat version
    dim2 = width
    dim1 = width*height
    base = _repeat(
        T.arange(num_batch, dtype='int32')*dim1, out_height*out_width)
    base_y0 = base + y0*dim2
    base_y1 = base + y1*dim2
    idx_a = base_y0 + x0
    idx_b = base_y1 + x0
    idx_c = base_y0 + x1
    idx_d = base_y1 + x1

    # use indices to lookup pixels for all samples
    im_flat = im.reshape((-1, channels))
    Ia = im_flat[idx_a]
    Ib = im_flat[idx_b]
    Ic = im_flat[idx_c]
    Id = im_flat[idx_d]

    # calculate interpolated values
    x0_f = T.cast(x0, 'float32')
    x1_f = T.cast(x1, 'float32')
    y0_f = T.cast(y0, 'float32')
    y1_f = T.cast(y1, 'float32')
    wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')
    wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')
    wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')
    wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')
    output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)
    return output
def MASK_blanking(x_i):
    # Find indicies of first and last non-zero value in x_i
    idxs = T.nonzero(x_i)[0][[1, -1]]
    # Diff = no of non zero values
    no_values = idxs[1] - idxs[0]
    # Move index inside by proportion of no of values
    idxs0 = T.cast(T.floor(idxs[0] + no_values * blank_proportion), 'int32')
    idxs1 = T.cast(T.floor(idxs[1] - no_values * blank_proportion), 'int32')
    # Return a vector that has a tighter mask than x_i
    return T.set_subtensor(T.zeros_like(x_i)[idxs0:idxs1], T.alloc(1., idxs1-idxs0))
예제 #17
0
파일: attention.py 프로젝트: berleon/seya
    def _interpolate(self, im, x, y, downsample_factor):
        # constants
        num_batch, height, width, channels = im.shape
        height_f = T.cast(height, floatX)
        width_f = T.cast(width, floatX)
        out_height = T.cast(height_f // downsample_factor, 'int64')
        out_width = T.cast(width_f // downsample_factor, 'int64')
        zero = T.zeros([], dtype='int64')
        max_y = T.cast(im.shape[1] - 1, 'int64')
        max_x = T.cast(im.shape[2] - 1, 'int64')

        # scale indices from [-1, 1] to [0, width/height]
        x = (x + 1.0)*(width_f) / 2.0
        y = (y + 1.0)*(height_f) / 2.0

        # do sampling
        x0 = T.cast(T.floor(x), 'int64')
        x1 = x0 + 1
        y0 = T.cast(T.floor(y), 'int64')
        y1 = y0 + 1

        x0 = T.clip(x0, zero, max_x)
        x1 = T.clip(x1, zero, max_x)
        y0 = T.clip(y0, zero, max_y)
        y1 = T.clip(y1, zero, max_y)
        dim2 = width
        dim1 = width*height
        base = self._repeat(
            T.arange(num_batch, dtype='int32')*dim1, out_height*out_width)
        base_y0 = base + y0*dim2
        base_y1 = base + y1*dim2
        idx_a = base_y0 + x0
        idx_b = base_y1 + x0
        idx_c = base_y0 + x1
        idx_d = base_y1 + x1

        # use indices to lookup pixels in the flat
        #  image and restore channels dim
        im_flat = im.reshape((-1, channels))
        Ia = im_flat[idx_a]
        Ib = im_flat[idx_b]
        Ic = im_flat[idx_c]
        Id = im_flat[idx_d]

        # and finanly calculate interpolated values
        x0_f = T.cast(x0, floatX)
        x1_f = T.cast(x1, floatX)
        y0_f = T.cast(y0, floatX)
        y1_f = T.cast(y1, floatX)
        wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')
        wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')
        wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')
        wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')
        output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)
        return output
예제 #18
0
    def _interpolate(im, x, y, downsample_factor):
        # constants
        num_batch, height, width, channels = im.shape
        height_f = T.cast(height, floatX)
        width_f = T.cast(width, floatX)
        out_height = T.cast(height_f // downsample_factor, 'int64')
        out_width = T.cast(width_f // downsample_factor, 'int64')
        zero = T.zeros([], dtype='int64')
        max_y = T.cast(im.shape[1] - 1, 'int64')
        max_x = T.cast(im.shape[2] - 1, 'int64')

        # scale indices from [-1, 1] to [0, width/height]
        x = (x + 1.0) * (width_f) / 2.0
        y = (y + 1.0) * (height_f) / 2.0

        # do sampling
        x0 = T.cast(T.floor(x), 'int64')
        x1 = x0 + 1
        y0 = T.cast(T.floor(y), 'int64')
        y1 = y0 + 1

        x0 = T.clip(x0, zero, max_x)
        x1 = T.clip(x1, zero, max_x)
        y0 = T.clip(y0, zero, max_y)
        y1 = T.clip(y1, zero, max_y)
        dim2 = width
        dim1 = width * height
        base = SpatialTransformer._repeat(
            T.arange(num_batch, dtype='int32') * dim1, out_height * out_width)
        base_y0 = base + y0 * dim2
        base_y1 = base + y1 * dim2
        idx_a = base_y0 + x0
        idx_b = base_y1 + x0
        idx_c = base_y0 + x1
        idx_d = base_y1 + x1

        # use indices to lookup pixels in the flat
        #  image and restore channels dim
        im_flat = im.reshape((-1, channels))
        Ia = im_flat[idx_a]
        Ib = im_flat[idx_b]
        Ic = im_flat[idx_c]
        Id = im_flat[idx_d]

        # and finanly calculate interpolated values
        x0_f = T.cast(x0, floatX)
        x1_f = T.cast(x1, floatX)
        y0_f = T.cast(y0, floatX)
        y1_f = T.cast(y1, floatX)
        wa = ((x1_f - x) * (y1_f - y)).dimshuffle(0, 'x')
        wb = ((x1_f - x) * (y - y0_f)).dimshuffle(0, 'x')
        wc = ((x - x0_f) * (y1_f - y)).dimshuffle(0, 'x')
        wd = ((x - x0_f) * (y - y0_f)).dimshuffle(0, 'x')
        output = T.sum([wa * Ia, wb * Ib, wc * Ic, wd * Id], axis=0)
        return output
예제 #19
0
def _interpolate(im, x, y, out_height, out_width, dtype = 'float32'):
  # *_f are floats
  num_batch, height, width, channels = im.shape
  height_f = T.cast(height, dtype = dtype)
  width_f = T.cast(width, dtype = dtype)

  # scale coordinates from [-1, 1] to [0, width/height - 1]
  idx = ((x >= 0) & (x <= 1) & (y >= 0) & (y <= 1)).nonzero()[0]
  # x = (x + 1) / 2 * (width_f - 1)
  # y = (y + 1) / 2 * (height_f - 1)
  x = x * (width_f - 1)
  y = y * (height_f - 1)
  # obtain indices of the 2x2 pixel neighborhood surrounding the coordinates;
  # we need those in floatX for interpolation and in int64 for indexing. for
  # indexing, we need to take care they do not extend past the image.
  x0_f = T.floor(x)
  y0_f = T.floor(y)
  x1_f = x0_f + 1
  y1_f = y0_f + 1
  x0 = T.cast(x0_f, 'int64')
  y0 = T.cast(y0_f, 'int64')
  x1 = T.cast(T.minimum(x1_f, width_f - 1), 'int64')
  y1 = T.cast(T.minimum(y1_f, height_f - 1), 'int64')

  # The input is [num_batch, height, width, channels]. We do the lookup in
  # the flattened input, i.e [num_batch*height*width, channels]. We need
  # to offset all indices to match the flat version
  dim2 = width
  dim1 = width*height
  base = T.repeat(
      T.arange(num_batch, dtype='int64')*dim1, out_height*out_width)
  base_y0 = base + y0*dim2
  base_y1 = base + y1*dim2
  idx_a = base_y0 + x0
  idx_b = base_y1 + x0
  idx_c = base_y0 + x1
  idx_d = base_y1 + x1

  # use indices to lookup pixels for all samples
  im_flat = im.reshape((-1, channels))
  Ia = im_flat[idx_a[idx]]
  Ib = im_flat[idx_b[idx]]
  Ic = im_flat[idx_c[idx]]
  Id = im_flat[idx_d[idx]]

  # calculate interpolated values
  wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')[idx, :]
  wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')[idx, :]
  wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')[idx, :]
  wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')[idx, :]
  output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)

  # out = T.zeros_like(((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x'))
  out = T.zeros_like(im_flat)
  return T.set_subtensor(out[idx, :], output)
예제 #20
0
def _interpolate_bicubic(im, x, y, out_height, out_width):
    # *_f are floats
    num_batch, height, width, channels = im.shape
    height_f = T.cast(height, theano.config.floatX)
    width_f = T.cast(width, theano.config.floatX)
    grid = _meshgrid(out_height, out_width)
    x_grid_flat = grid[0].flatten()
    y_grid_flat = grid[1].flatten()

    # clip coordinates to [-1, 1]
    x = T.clip(x, -1, 1)
    y = T.clip(y, -1, 1)
    # scale coordinates from [-1, 1] to [0, width/height - 1]
    x = (x + 1) / 2 * (width_f - 1)
    y = (y + 1) / 2 * (height_f - 1)

    x0_f = T.floor(x)
    y0_f = T.floor(y)
    x0 = T.cast(x0_f, "int64")
    y0 = T.cast(y0_f, "int64")
    # return T.concatenate(((x0-x).dimshuffle(0, 'x')**2, 0.0*dg2(x.dimshuffle(0, 'x')), 0.0*dg2(x0.dimshuffle(0, 'x'))), 1)

    offsets = np.arange(-1, 3).astype(int)
    dim2 = width
    dim1 = width * height
    base = T.repeat(T.arange(num_batch, dtype="int64") * dim1, out_height * out_width)
    # Need to convert (x, y) to linear
    def _flat_idx(xx, yy, dim2=dim2):
        return base + yy * dim2 + xx

    y_locs = [y0 + offset for offset in offsets]
    ys = [T.clip(loc, 0, height - 1) for loc in y_locs]

    def _cubic_interp_dim(im_flat, other_idx):
        """Cubic interpolation along a dimension
        """
        neighbor_locs = [x0 + offset for offset in offsets]
        neighbor_idx = [T.clip(nloc, 0, width - 1) for nloc in neighbor_locs]
        xidxs = neighbor_idx
        yidxs = [other_idx] * len(neighbor_idx)
        neighbor_idxs = [_flat_idx(xidx, yidx) for xidx, yidx in zip(xidxs, yidxs)]
        values = [im_flat[idx] for idx in neighbor_idxs]
        weights = [_cubic_conv_weights(dg2(nloc) - x).dimshuffle(0, "x") for nloc in neighbor_locs]
        # Interpolate along x direction
        out = T.sum([dg2(v) * w for w, v in zip(weights, values)], axis=0) / T.sum(weights, axis=0)
        return out

    im_flat = im.reshape((-1, channels))
    ims = [_cubic_interp_dim(im_flat, yidx) for yidx in ys]
    yweights = [_cubic_conv_weights(dg2(yloc) - y).dimshuffle(0, "x") for yloc in y_locs]
    out = T.sum(
        [v * _cubic_conv_weights(dg2(yloc) - y).dimshuffle(0, "x") for v, yloc in zip(ims, y_locs)], axis=0
    ) / T.sum(yweights, axis=0)
    return out
예제 #21
0
    def get_output_for(self, inputs, **kwargs):
        # For each ROI R = [batch_index x1 y1 x2 y2]: max pool over R
        input = inputs[0]
        boxes = inputs[1]
        batch = T.shape(input)[0]
        channels = T.shape(input)[1]
        height = T.shape(input)[2]
        width = T.shape(input)[3]
        num_boxes = T.shape(boxes)[0]
        output = T.zeros((batch * num_boxes, channels, self.num_features))

        for idbb, bb in enumerate(range(num_boxes)):
            batch_ind = bb[0]

            pool_list = []
            #for pool_dim in self.pool_dims:
            start_w = T.clip(T.floor(bb[1] * self.sp_scale), 0, width)
            start_h = T.clip(T.floor(bb[2] * self.sp_scale), 0, heigth)
            end_w = T.clip(T.ceil(bb[3] * self.sp_scale), 0, width)
            end_h = T.clip(T.ceil(bb[4] * self.sp_scale), 0, height)

            w = T.max(end_w - start_w + 1, 1)
            h = T.amx(end_h - start_h + 1, 1)

            start_samples_y, start_sample_x = T.floor(
                _meshgrid(start_h, end_h, pool_dims + 1, start_w, end_w,
                          pool_dims + 1))
            end_samples_y, end_sample_x = T.ceil(
                _meshgrid(start_h, end_h, pool_dims + 1, start_w, end_w,
                          pool_dims + 1))

            input[batch_ind, :,
                  np.floor(py):np.ceil(samples_y[idy + 1]),
                  np.floor(px):np.ceil(samples_x[idx + 1])]

            #T.max()

            #for idx,px in enumerate(samples_x[:-1]):
            #    for idy,py in enumerate(samples_y[:-1]):

            #       (pool.dnn_pool( input[batch_ind,:,np.floor(py):np.ceil(samples_y[idy+1]),np.floor(px):np.ceil(samples_x[idx+1])],(0,0),(None,None),'max', (0,0) )).flatten(2)

            #sz_w = ( w - 1 ) // pool_dim
            #sz_h = ( h - 1 ) // pool_dim

            #str_h = w // pool_dim
            #str_w = h // pool_dim

            #pool = dnn.dnn_pool( input[bb[0],:,start_h:end_h+1,start_w:end_w+1], (sz_h,sz_w),                 (str_h,str_w), 'max', (0,0) ).flatten(2)
        pool_list.append(pool)
        output[idbb] = T.transpose(T.concatenate(
            pool_list, axis=1))  #not efficient but for the moment is ok!
        #if everything is correct this vector should be ordered as in fast RCNN
        return output
예제 #22
0
def t_noise3d(v, perm, grad3):
    x = v[0]
    y = v[1]
    z = v[2]
    skew_factor = (x + y + z) * 1.0 / 3.0
    i = T.floor(x + skew_factor)
    j = T.floor(y + skew_factor)
    k = T.floor(z + skew_factor)
    unskew_factor = (i + j + k) * 1.0 / 6.0
    x0 = x - (i - unskew_factor)
    y0 = y - (j - unskew_factor)
    z0 = z - (k - unskew_factor)
    vertices = T.switch(
        T.ge(x0, y0),
        T.switch(
            T.ge(y0, z0), vertices_options[0],
            T.switch(T.ge(x0, z0), vertices_options[1], vertices_options[2])),
        T.switch(
            T.lt(y0, z0), vertices_options[3],
            T.switch(T.lt(x0, z0), vertices_options[4], vertices_options[5])))
    x1 = x0 - vertices[0][0] + 1.0 / 6.0
    y1 = y0 - vertices[0][1] + 1.0 / 6.0
    z1 = z0 - vertices[0][2] + 1.0 / 6.0
    x2 = x0 - vertices[1][0] + 1.0 / 3.0
    y2 = y0 - vertices[1][1] + 1.0 / 3.0
    z2 = z0 - vertices[1][2] + 1.0 / 3.0
    x3 = x0 - 0.5
    y3 = y0 - 0.5
    z3 = z0 - 0.5
    ii = T.bitwise_and(i.astype('int32'), 255)
    jj = T.bitwise_and(j.astype('int32'), 255)
    kk = T.bitwise_and(k.astype('int32'), 255)
    gi0 = perm[ii + perm[jj + perm[kk].astype('int32')].astype('int32')] % 12
    gi1 = perm[ii + vertices[0][0] + perm[jj + vertices[0][1] + perm[
        kk + vertices[0][2]].astype('int32')].astype('int32')] % 12
    gi2 = perm[ii + vertices[1][0] + perm[jj + vertices[1][1] + perm[
        kk + vertices[1][2]].astype('int32')].astype('int32')] % 12
    gi3 = perm[ii + 1 +
               perm[jj + 1 +
                    perm[kk + 1].astype('int32')].astype('int32')] % 12
    t0 = 0.5 - x0**2 - y0**2 - z0**2
    n0 = T.switch(T.lt(t0, 0), 0.0,
                  t0**4 * T.dot(grad3[gi0.astype('int32')], [x0, y0, z0]))
    t1 = 0.5 - x1**2 - y1**2 - z1**2
    n1 = T.switch(T.lt(t1, 0), 0.0,
                  t1**4 * T.dot(grad3[gi1.astype('int32')], [x1, y1, z1])),
    t2 = 0.5 - x2**2 - y2**2 - z2**2
    n2 = T.switch(T.lt(t2, 0), 0.0,
                  t2**4 * T.dot(grad3[gi2.astype('int32')], [x2, y2, z2]))
    t3 = 0.5 - x3**2 - y3**2 - z3**2
    n3 = T.switch(T.lt(t3, 0), 0.0,
                  t3**4 * T.dot(grad3[gi3.astype('int32')], [x3, y3, z3]))
    return 23.0 * (n0 + n1 + n2 + n3)
예제 #23
0
파일: layers.py 프로젝트: wufangjie/dnn
 def process(self, input, tparams, BNparams):
     b, f, h0, w0 = input.shape
     result = []
     for h, w in self.pymamid:
         win_h = T.ceil(h0 / h).astype('int32')
         win_w = T.ceil(w0 / w).astype('int32')
         str_h = T.floor(h0 / h).astype('int32')
         str_w = T.floor(w0 / w).astype('int32')
         result.append(dnn_pool(
             img=input, ws=(win_h, win_w), mode=self.mode,
             stride=(str_h, str_w), pad=(0, 0)).reshape([b, -1]))
     return T.concatenate(result, axis=1)
예제 #24
0
파일: pool.py 프로젝트: HapeMask/Lasagne
def pool_2d_nxn_regions(inputs, output_size, mode='max'):
    """
    Performs a pooling operation that results in a fixed size:
    output_size x output_size.
    Used by SpatialPyramidPoolingLayer. Refer to appendix A in [1]

    Parameters
    ----------
    inputs : a tensor with 4 dimensions (N x C x H x W)
    output_size: integer
        The output size of the pooling operation
    mode : string
        Pooling mode, one of 'max', 'average_inc_pad', 'average_exc_pad'
        Defaults to 'max'.

    Returns a list of tensors, for each output bin.
       The list contains output_size*output_size elements, where
       each element is a 3D tensor (N x C x 1)

    References
    ----------
    .. [1] He, Kaiming et al (2015):
           Spatial Pyramid Pooling in Deep Convolutional Networks
           for Visual Recognition.
           http://arxiv.org/pdf/1406.4729.pdf.
    """

    if mode == 'max':
        pooling_op = T.max
    elif mode in ['average_inc_pad', 'average_exc_pad']:
        pooling_op = T.mean
    else:
        msg = "Mode must be either 'max', 'average_inc_pad' or "
        msg += "'average_exc_pad'. Got '{0}'"
        raise ValueError(msg.format(mode))

    h, w = inputs.shape[2:]

    result = []
    n = float(output_size)

    for row in range(output_size):
        for col in range(output_size):
            start_h = T.floor(row / n * h).astype('int32')
            end_h = T.ceil((row + 1) / n * h).astype('int32')
            start_w = T.floor(col / n * w).astype('int32')
            end_w = T.ceil((col + 1) / n * w).astype('int32')

            pooling_region = inputs[:, :, start_h:end_h, start_w:end_w]
            this_result = pooling_op(pooling_region, axis=(2, 3))
            result.append(this_result.dimshuffle(0, 1, 'x'))
    return result
예제 #25
0
def pool_2d_nxn_regions(inputs, output_size, mode='max'):
    """
    Performs a pooling operation that results in a fixed size:
    output_size x output_size.
    Used by SpatialPyramidPoolingLayer. Refer to appendix A in [1]

    Parameters
    ----------
    inputs : a tensor with 4 dimensions (N x C x H x W)
    output_size: integer
        The output size of the pooling operation
    mode : string
        Pooling mode, one of 'max', 'average_inc_pad', 'average_exc_pad'
        Defaults to 'max'.

    Returns a list of tensors, for each output bin.
       The list contains output_size*output_size elements, where
       each element is a 3D tensor (N x C x 1)

    References
    ----------
    .. [1] He, Kaiming et al (2015):
           Spatial Pyramid Pooling in Deep Convolutional Networks
           for Visual Recognition.
           http://arxiv.org/pdf/1406.4729.pdf.
    """

    if mode == 'max':
        pooling_op = T.max
    elif mode in ['average_inc_pad', 'average_exc_pad']:
        pooling_op = T.mean
    else:
        msg = "Mode must be either 'max', 'average_inc_pad' or "
        msg += "'average_exc_pad'. Got '{0}'"
        raise ValueError(msg.format(mode))

    h, w = inputs.shape[2:]

    result = []
    n = float(output_size)

    for row in range(output_size):
        for col in range(output_size):
            start_h = T.floor(row / n * h).astype('int32')
            end_h = T.ceil((row + 1) / n * h).astype('int32')
            start_w = T.floor(col / n * w).astype('int32')
            end_w = T.ceil((col + 1) / n * w).astype('int32')

            pooling_region = inputs[:, :, start_h:end_h, start_w:end_w]
            this_result = pooling_op(pooling_region, axis=(2, 3))
            result.append(this_result.dimshuffle(0, 1, 'x'))
    return result
예제 #26
0
def blockify(
        inp, block_size = (1, 1), step_size = (1, 1), direction = (1, 1),
        padding = False):
    input_size = T.shape(inp)
    if padding:
        b0 = T.ceil((input_size[0] - block_size[0]) / step_size[0]) + 1
        b1 = T.ceil((input_size[1] - block_size[1]) / step_size[1]) + 1
    else:
        b0 = T.floor((input_size[0] - block_size[0]) / step_size[0]) + 1
        b1 = T.floor((input_size[1] - block_size[1]) / step_size[1]) + 1
    num_blocks = b0 * b1

    for b in range(num_blocks):
예제 #27
0
def _interpolate_bicubic(im, x, y, out_height, out_width):
    # *_f are floats
    num_batch, height, width, channels = im.shape
    height_f = T.cast(height, theano.config.floatX)
    width_f = T.cast(width, theano.config.floatX)
    grid = _meshgrid(out_height, out_width)
    x_grid_flat = grid[0].flatten()
    y_grid_flat = grid[1].flatten()

    # clip coordinates to [-1, 1]
    x = T.clip(x, -1, 1)
    y = T.clip(y, -1, 1)
    # scale coordinates from [-1, 1] to [0, width/height - 1]
    x = (x + 1) / 2 * (width_f - 1)
    y = (y + 1) / 2 * (height_f - 1)

    x0_f = T.floor(x)
    y0_f = T.floor(y)
    x0 = T.cast(x0_f, 'int64')
    y0 = T.cast(y0_f, 'int64')
    #return T.concatenate(((x0-x).dimshuffle(0, 'x')**2, 0.0*dg2(x.dimshuffle(0, 'x')), 0.0*dg2(x0.dimshuffle(0, 'x'))), 1)

    offsets = np.arange(-1, 3).astype(int)
    dim2 = width
    dim1 = width*height
    base = T.repeat(
        T.arange(num_batch, dtype='int64')*dim1, out_height*out_width)
    # Need to convert (x, y) to linear
    def _flat_idx(xx, yy, dim2=dim2):
        return base + yy * dim2 + xx
    y_locs = [y0 + offset for offset in offsets]
    ys = [T.clip(loc, 0, height - 1) for loc in y_locs]

    def _cubic_interp_dim(im_flat, other_idx):
        """Cubic interpolation along a dimension
        """
        neighbor_locs = [x0 + offset for offset in offsets]
        neighbor_idx = [T.clip(nloc, 0, width - 1) for nloc in neighbor_locs]
        xidxs = neighbor_idx
        yidxs = [other_idx] * len(neighbor_idx)
        neighbor_idxs = [_flat_idx(xidx, yidx) for xidx, yidx in zip(xidxs, yidxs)]
        values = [im_flat[idx] for idx in neighbor_idxs]
        weights = [_cubic_conv_weights(dg2(nloc) - x).dimshuffle(0, 'x')  for nloc in neighbor_locs]
        # Interpolate along x direction
        out = T.sum([dg2(v) * w for w, v in zip(weights, values)], axis=0) / T.sum(weights, axis=0)
        return out
    im_flat = im.reshape((-1, channels))
    ims = [_cubic_interp_dim(im_flat, yidx) for yidx in ys]
    yweights =  [_cubic_conv_weights(dg2(yloc) - y).dimshuffle(0, 'x') for yloc in y_locs]
    out = T.sum([v *  _cubic_conv_weights(dg2(yloc) - y).dimshuffle(0, 'x') for v, yloc in zip(ims,  y_locs)], axis=0) / T.sum(yweights, axis=0)
    return out
예제 #28
0
def _interpolate(input_, warpgrid_x, warpgrid_y):
    num_batch, height, width, channels = input_.shape
    height_f = T.cast(height, theano.config.floatX)
    width_f = T.cast(width, theano.config.floatX)
    x, y = warpgrid_x, warpgrid_y

    x0_f = T.floor(x)
    y0_f = T.floor(y)
    x1_f = x0_f + 1
    y1_f = y0_f + 1

    # 1 clip out of boundary points
    x0 = T.clip(x0_f, 0, width_f - 1)
    x1 = T.clip(x1_f, 0, width_f - 1)
    y0 = T.clip(y0_f, 0, height_f - 1)
    y1 = T.clip(y1_f, 0, height_f - 1)
    x0, x1, y0, y1 = (T.cast(v, 'int64') for v in (x0, x1, y0, y1))

    # 2 convert to indexing in flatten vector
    dim2 = width
    dim1 = width * height
    base = T.repeat(T.arange(num_batch, dtype='int64') * dim1, height * width)
    base_y0 = base + y0 * dim2
    base_y1 = base + y1 * dim2
    idx_a = base_y0 + x0
    idx_b = base_y1 + x0
    idx_c = base_y0 + x1
    idx_d = base_y1 + x1

    # 3 indexing and sum
    im_flat = T.reshape(input_, (-1, channels))
    Ia = im_flat[idx_a]  #[num_batch*height*width, channels]
    Ib = im_flat[idx_b]
    Ic = im_flat[idx_c]
    Id = im_flat[idx_d]

    wa = T.repeat(((x1_f - x) * (y1_f - y)).dimshuffle(0, 'x'),
                  channels,
                  axis=1)
    wb = T.repeat(((x1_f - x) * (y - y0_f)).dimshuffle(0, 'x'),
                  channels,
                  axis=1)
    wc = T.repeat(((x - x0_f) * (y1_f - y)).dimshuffle(0, 'x'),
                  channels,
                  axis=1)
    wd = T.repeat(((x - x0_f) * (y - y0_f)).dimshuffle(0, 'x'),
                  channels,
                  axis=1)
    res = wa * Ia + wb * Ib + wc * Ic + wd * Id

    return res
def binlinear_sampling(img, x, y, df):
    # constants

    num_batch, height, width, channels = img.shape
    f_height = T.cast(height, 'float32')
    f_width = T.cast(width, 'float32')
    o_height = T.cast(f_height // downsample_factor, 'int64')
    o_width = T.cast(f_width // downsample_factor, 'int64')
    zero = T.zeros([], dtype='int64')
    y_max = T.cast(img.shape[1] - 1, 'int64')
    x_max = T.cast(img.shape[2] - 1, 'int64')
    o_x = (x + 1.0) * (f_width) / 2.0
    o_y = (y + 1.0) * (f_height) / 2.0

    x0 = T.cast(T.floor(o_x), 'int64')
    x1 = x0 + 1
    y0 = T.cast(T.floor(o_y), 'int64')
    y1 = y0 + 1

    x_floor = T.clip(x0, zero, x_max)
    x_ceil = T.clip(x1, zero, x_max)
    y_floor = T.clip(y0, zero, y_max)
    y_ceil = T.clip(y1, zero, y_max)
    dim1 = width * height
    dim2 = width
    base = rept(T.arange(num_batch, dtype='int32') * dim1, o_height * o_width)
    base_y_floor = base + y_floor * dim2
    base_y_ceil = base + y_ceil * dim2
    idxa = base_y_floor + x_floor
    idxb = base_y_ceil + x_floor
    idxc = base_y_floor + x_ceil
    idxd = base_y_ceil + x_ceil

    img_flat = img.reshape((-1, channels))
    I_a = img_flat[idxa]
    I_b = img_flat[idxb]
    I_c = img_flat[idxc]
    I_d = img_flat[idxd]

    # and finanly calculate interpolated values
    xf_f = T.cast(x_floor, 'float32')
    xc_f = T.cast(x_ceil, 'float32')
    yf_f = T.cast(y_floor, 'float32')
    yc_f = T.cast(y_ceil, 'float32')
    w_a = ((xc_f - x) * (yc_f - y)).dimshuffle(0, 'x')
    w_b = ((xc_f - x) * (y - yf_f)).dimshuffle(0, 'x')
    w_c = ((x - xf_f) * (yc_f - y)).dimshuffle(0, 'x')
    w_d = ((x - xf_f) * (y - yf_f)).dimshuffle(0, 'x')
    output = T.sum([w_a * I_a, w_b * I_b, w_c * I_c, w_d * I_d], axis=0)
    return output
예제 #30
0
파일: rand.py 프로젝트: gburt/iaf
def discretized_gaussian(mean, logvar, binsize, sample=None):
    scale = T.exp(.5*logvar)
    if sample is None:
        _y = G.rng_curand.normal(size=mean.shape)
        sample = mean + scale * _y #sample from the actual logistic
        sample = T.floor(sample/binsize)*binsize #discretize the sample
    _sample = (T.floor(sample/binsize)*binsize - mean)/scale
    def _erf(x):
        return T.erf(x/T.sqrt(2.))
    logp = T.log( _erf(_sample + binsize/scale) - _erf(_sample) + 1e-7) + T.log(.5)
    logp = logp.flatten(2).sum(axis=1)
    #raise Exception()
    entr = (.5 * (T.log(2 * math.pi) + 1 + logvar)).flatten(2).sum(axis=1)
    return RandomVariable(sample, logp, entr, mean=mean, logvar=logvar)
예제 #31
0
파일: rand.py 프로젝트: gburt/iaf
def discretized_logistic(mean, logscale, binsize, sample=None):
    scale = T.exp(logscale)
    if sample is None:
        u = G.rng_curand.uniform(size=mean.shape)
        _y = T.log(-u/(u-1)) #inverse CDF of the logistic
        sample = mean + scale * _y #sample from the actual logistic
        sample = T.floor(sample/binsize)*binsize #discretize the sample
    _sample = (T.floor(sample/binsize)*binsize - mean)/scale
    logps = T.log( T.nnet.sigmoid(_sample + binsize/scale) - T.nnet.sigmoid(_sample) + 1e-7)
    logp = logps.flatten(2).sum(axis=1)
    #raise Exception()
    entr = logscale.flatten(2)
    entr = entr.sum(axis=1) + 2. * entr.shape[1].astype(G.floatX)
    return RandomVariable(sample, logp, entr, mean=mean, logscale=logscale, logps=logps)
예제 #32
0
 def sample(self, X):
     mu = X[0]
     sig = X[1]
     coeff = X[2]
     n_noise = T.cast(T.floor(coeff.shape[-1] * self.p_noise), 'int32')
     mu = T.concatenate(
         [mu, T.zeros((mu.shape[0],
                       n_noise*sig.shape[1]/coeff.shape[-1]))],
         axis=1
     )
     mu = mu.reshape((mu.shape[0],
                      mu.shape[1]/coeff.shape[-1],
                      coeff.shape[-1]))
     sig = sig.reshape((sig.shape[0],
                        sig.shape[1]/coeff.shape[-1],
                        coeff.shape[-1]))
     idx = predict(
         self.theano_rng.multinomial(
             pvals=coeff,
             dtype=coeff.dtype
         ),
         axis=1
     )
     mu = mu[T.arange(mu.shape[0]), :, idx]
     sig = sig[T.arange(sig.shape[0]), :, idx]
     sample = self.theano_rng.normal(size=mu.shape,
                                     avg=mu, std=sig,
                                     dtype=mu.dtype)
     return sample
예제 #33
0
    def compute_hard_windows(self, image_shape, location, scale):
        # find topleft(front) and bottomright(back) corners for each patch
        a = location - 0.5 * (T.cast(self.patch_shape, theano.config.floatX) / scale)
        b = location + 0.5 * (T.cast(self.patch_shape, theano.config.floatX) / scale)

        # grow by three patch pixels
        a -= self.kernel.k_sigma_radius(self.cutoff, scale)
        b += self.kernel.k_sigma_radius(self.cutoff, scale)

        # clip to fit inside image and have nonempty window
        a = T.clip(a, 0, image_shape - 1)
        b = T.clip(b, a + 1, image_shape)

        if self.batched_window:
            # take the bounding box of all windows; now the slices
            # will have the same length for each sample and scan can
            # be avoided.  comes at the cost of typically selecting
            # more of the input.
            a = a.min(axis=0, keepdims=True)
            b = b.max(axis=0, keepdims=True)

        # make integer
        a = T.cast(T.floor(a), 'int16')
        b = T.cast(T.ceil(b), 'int16')

        return a, b
예제 #34
0
        def _histogram(x, nbin=20, xlim=(0, 1)):

            y = T.floor((x - xlim[0]) * nbin / (xlim[1] - xlim[0]))
            hist = T.stack([
                T.cast(T.eq(y, b), T.config.floatX).sum() for b in range(nbin)
            ])
            return hist / hist.sum()
예제 #35
0
 def compute_sub_all_scores(self, start_end):
     plu = softmax(
         T.dot(self.trained_users[start_end],
               self.trained_items.T))[:, :-1]  # (n_batch, n_item)
     length = T.max(T.sum(self.tes_masks[start_end], axis=1))  # 253
     cidx = T.arange(length).reshape(
         (1, length)) + self.tra_accum_lens[start_end][:, 0].reshape(
             (len(start_end), 1))
     cl = T.sum(self.trained_items[self.tra_context_masks[cidx]],
                axis=2)  # n_batch x seq_length x n_size
     cl = cl.dimshuffle(1, 2, 0)
     pb = self.trained_branch[
         self.routes]  # (n_item x 4 x tree_depth x n_size)
     shp0, shp1, shp2 = self.lrs.shape
     lrs = self.lrs.reshape((shp0, shp1, shp2, 1, 1))
     pr_bc = T.dot(pb, cl)
     br = sigmoid(pr_bc * lrs) * T.ceil(
         abs(pr_bc))  # (n_item x 4 x tree_depth x seq_length x n_batch)
     path = T.prod(br, axis=2) * self.probs.reshape((shp0, shp1, 1, 1))
     del cl, pb, br, lrs
     # paths = T.prod((T.floor(1 - path) + path), axis=1)  # (n_item x seq_length x n_batch)
     paths = T.sum(path, axis=1)
     paths = T.floor(1 - paths) + paths
     p = paths[:-1].T * plu.reshape(
         (plu.shape[0], 1, plu.shape[1]))  # (n_batch x n_item)
     # p = plu.reshape((plu.shape[0], 1, plu.shape[1])) * T.ones((plu.shape[0], length, plu.shape[1]))
     return T.reshape(p, (p.shape[0] * p.shape[1], p.shape[2])).eval()
예제 #36
0
 def __theano_train__(self, n_size):
     """
     Pr(l|u, C(l)) = Pr(l|u) * Pr(l|C(l))
     Pr(u, l, t) = Pr(l|u, C(l))     if C(l) exists,
                   Pr(l|u)           otherwise.
     $Theta$ = argmax Pr(u, l, t)
     """
     tra_mask = T.ivector()
     seq_length = T.sum(tra_mask)  # 有效长度
     wl = T.concatenate((self.wl, self.wl_m))
     tidx, cidx, bidx, userid = T.ivector(), T.imatrix(), T.itensor3(
     ), T.iscalar()
     pb = self.pb[bidx]  # (seq_length x 4 x depth x n_size)
     lrs = self.lrs[tidx]  # (seq_length x 4 x depth)
     # user preference
     xu = self.xu[userid]
     plu = softmax(T.dot(xu, self.wl.T))
     # geographical influence
     cl = T.sum(wl[cidx], axis=1)  # (seq_length x n_size)
     cl = cl.reshape((cl.shape[0], 1, 1, cl.shape[1]))
     br = sigmoid(T.sum(pb[:seq_length] * cl, axis=3) *
                  lrs[:seq_length]) * T.ceil(abs(T.mean(cl, axis=3)))
     path = T.prod(br, axis=2) * self.probs[tidx][:seq_length]
     # paths = T.prod((T.floor(1-path) + path), axis=1)
     paths = T.sum(path, axis=1)
     paths = T.floor(1 - paths) + paths
     # ----------------------------------------------------------------------------
     # cost, gradients, learning rate, l2 regularization
     lr, l2 = self.alpha_lambda[0], self.alpha_lambda[1]
     seq_l2_sq = T.sum([T.sum(par**2) for par in [xu, self.wl]])
     upq = -1 * T.sum(T.log(plu[tidx[:seq_length]] * paths)) / seq_length
     seq_costs = (upq + 0.5 * l2 * seq_l2_sq)
     seq_grads = T.grad(seq_costs, self.params)
     seq_updates = [(par, par - lr * gra)
                    for par, gra in zip(self.params, seq_grads)]
     pars_subs = [(self.xu, xu), (self.pb, pb)]
     seq_updates.extend([
         (par, T.set_subtensor(sub, sub - lr * T.grad(seq_costs, sub)))
         for par, sub in pars_subs
     ])
     # ----------------------------------------------------------------------------
     uidx = T.iscalar()  # T.iscalar()类型是 TensorType(int32, )
     self.seq_train = theano.function(
         inputs=[uidx],
         outputs=upq,
         updates=seq_updates,
         givens={
             userid:
             uidx,
             tidx:
             self.tra_target_masks[uidx],
             cidx:
             self.tra_context_masks[T.arange(self.tra_accum_lens[uidx][0],
                                             self.tra_accum_lens[uidx][1])],
             bidx:
             self.routes[self.tra_target_masks[uidx]],
             tra_mask:
             self.tra_masks[uidx]
             # tra_mask_cot: self.tra_masks_cot[T.arange(self.tra_accum_lens[uidx][0], self.tra_accum_lens[uidx][1])]
         })
예제 #37
0
파일: permute.py 프로젝트: zenna/ig
    def get_output_for(self, input, **kwargs):
        p = self.p
        k = self.k
        nbatches = input.shape[0]
        x_len = self.x_len
        # x_len = 30
        # x = input.reshape((nbatches, x_len))
        x = input.reshape((nbatches, x_len))

        p_floor = T.floor(p)
        p_ceil = T.ceil(p)
        
        # Deltas
        p_delta = p - p_floor
        ep_delta = T.exp(k*-p_delta)

        p2_delta = 1 - p_delta
        ep2_delta = T.exp(k*-p2_delta)

        p0_delta = 1 + p_delta
        ep0_delta = T.exp(k*-p0_delta)

        ep_sum = ep_delta + ep2_delta + ep0_delta

        perm1 = x[:, (T.cast(p_floor, 'int32'))%x_len]
        perm2 = x[:, (T.cast(p_ceil, 'int32')+1)%x_len]
        perm0 = x[:, (T.cast(p_floor, 'int32')-1)%x_len]

        perm1_factor = ep_delta * perm1
        perm2_factor = ep2_delta * perm2
        perm3_factor = ep0_delta * perm0
        res = (perm1_factor + perm2_factor + perm3_factor) / ep_sum
        return res.reshape(input.shape)
예제 #38
0
def create_learning_rate_func(solver_params):
    base = tt.fscalar('base')
    gamma = tt.fscalar('gamma')
    power = tt.fscalar('power')
    itrvl = tt.fscalar('itrvl')
    iter = tt.scalar('iter')

    if solver_params['lr_type']=='inv':
        lr_ = base * tt.pow(1 + gamma * iter, -power)

        lr = t.function(
            inputs=[iter, t.Param(base, default=solver_params['base']), t.Param(gamma, default=solver_params['gamma']), t.Param(power, default=solver_params['power'])],
            outputs=lr_)

    elif solver_params['lr_type']=='fixed':
        lr_ = base

        lr = t.function(
            inputs=[iter, t.Param(base, default=solver_params['base'])],
            outputs=lr_,
            on_unused_input='ignore')

    elif solver_params['lr_type']=='episodic':
        lr_ = base / (tt.floor(iter/itrvl) + 1)

        lr = t.function(
            inputs=[iter, t.Param(base, default=solver_params['base']), t.Param(itrvl, default=solver_params['interval'])],
            outputs=lr_,
            on_unused_input='ignore')
    return lr
예제 #39
0
 def inv(self, output):
     
     output = (output.dimshuffle(0,1,2,'x',3,'x',4,'x')
         .repeat(self.pool_shape[0], axis=7),
         .repeat(self.pool_shape[1], axis=5),
         .repeat(self.pool_shape[2], axis=3))
     
     if self.depooler == 'random':
         unpooled = (
             self.input_shape[0], self.input_shape[1], 
             self.input_shape[2]//self.pool_shape[0], self.pool_shape[0],
             self.input_shape[3]//self.pool_shape[1], self.pool_shape[1],
             self.input_shape[4]//self.pool_shape[2], self.pool_shape[2])
         
         pooled = (
             self.input_shape[0], self.input_shape[1], 
             self.input_shape[2]//self.pool_shape[0], 1,
             self.input_shape[3]//self.pool_shape[1], 1,
             self.input_shape[4]//self.pool_shape[2], 1)
         
         output_mask = self.theano_rng.uniform(size=unpooled, dtype=theano.config.floatX)
         output_mask = output_mask / output_mask.max(axis=7).max(axis=5).max(axis=3).dimshuffle(0,1,2,'x',3,'x',4,'x')
         output_mask = T.floor(output_mask)
         
         return (output_mask * output).reshape(self.input_shape)
     else:
         output = self.depooler(output, axis=3)
예제 #40
0
    def build_graph(self):
        # theano variables
        iw_b = T.lmatrix('iw_b')
        ic_b = T.ltensor3('ic_b')
        it_b = T.lmatrix('it_b')
        il_b = T.lmatrix('il_b')
        v_b = T.lmatrix('v_b')  # valid action mask
        y_b = T.lvector('y_b')  # index of the correct action from oracle

        steps = T.lscalar('steps')  # num_of steps
        lr = self.args.learn_rate * self.args.decay**T.cast(
            T.floor(steps / 2000.), 'float32')

        iw, ic, it, il, self.actor = self.get_actor(False)
        iw_avg, ic_avg, it_avg, il_avg, self.actor_avg = self.get_actor(True)

        actor_prob = L.get_output(self.actor_avg, {
            iw_avg: iw_b,
            ic_avg: ic_b,
            it_avg: it_b,
            il_avg: il_b
        },
                                  deterministic=True)
        actor_rest = actor_prob * T.cast(
            v_b, theano.config.floatX
        )  # mask the probabilities of invalid actions to 0
        actor_pred = T.argmax(actor_rest, 1)
        self.actor_predict = theano.function([v_b, iw_b, ic_b, it_b, il_b],
                                             actor_pred,
                                             on_unused_input='ignore')

        y_hat = L.get_output(self.actor, {
            iw: iw_b,
            ic: ic_b,
            it: it_b,
            il: il_b
        },
                             deterministic=False)
        xent = T.mean(lasagne.objectives.categorical_crossentropy(y_hat, y_b))
        reg = lasagne.regularization.regularize_network_params(
            L.get_all_layers(self.actor), lasagne.regularization.l2)
        cost = xent + self.args.reg_rate * reg
        correct = T.eq(T.argmax(y_hat, 1), y_b).sum()

        params = L.get_all_params(self.actor)
        avg_params = L.get_all_params(self.actor_avg)
        grads = T.grad(cost, params)
        if self.args.grad_norm:
            grads, norm = lasagne.updates.total_norm_constraint(
                grads, self.args.grad_norm, return_norm=True)

        updates = lasagne.updates.momentum(grads, params, lr,
                                           self.args.momentum)
        updates = apply_moving_average(params, avg_params, updates, steps,
                                       0.9999)

        inputs = [steps, y_b, v_b, iw_b, ic_b, it_b, il_b]
        self.train_actor_supervised = theano.function(inputs, [correct, cost],
                                                      updates=updates,
                                                      on_unused_input='ignore')
	def dog_output(input_image):

	    _,channels,height,width=input_image.shape
	    conv_output = T.nnet.conv2d(input_image, dog_W, filter_flip=False,
		                        border_mode='half', subsample=(1, 1))
	    time_steps=32

	    conv_output2=conv_output[:,::-1,:,:]

	    dog_maps=conv_output - conv_output2
	    #
	    dog_maps=T.ge(dog_maps,0)*dog_maps
	    dog_maps = T.switch(dog_maps>0.0, dog_maps, 0.0)
	    dog_maps_neq_Zero=T.neq(T.reshape(dog_maps,(batch_size*height*width*channels*2,)),0)
	    #
	    dog_maps=T.floor(dog_maps*time_steps)
	    dog_maps=T.switch(dog_maps<time_steps-1,dog_maps,time_steps-1)
	    dog_maps=T.cast(dog_maps,'int32')
	    # dog_maps = time_steps-1-dog_maps
	    # dog_maps = T.reshape(dog_maps,(batch_size*height*width*channels*2,))

	    # output = T.zeros((time_steps,batch_size*height*width*channels*2))
	    # #
	    # output = T.set_subtensor(output[dog_maps,T.arange(output.shape[1])],dog_maps_neq_Zero)
	    # output = T.reshape(output,[time_steps,batch_size,channels*2,height,width])
	    # #print(dog_maps[:].eval())
	    return dog_maps
예제 #42
0
    def get_stencil(self, t, r=None, texp=None):
        if r is None or texp is None:
            return tt.shape_padright(t)

        z = tt.zeros_like(self.a)
        r = tt.as_tensor_variable(r)
        R = self.r_star + z
        hp = 0.5 * self.period

        if self.ecc is None:
            # Equation 14 from Winn (2010)
            k = r / self.r_star
            arg1 = tt.square(1 + k) - tt.square(self.b)
            arg2 = tt.square(1 - k) - tt.square(self.b)
            factor = R / (self.a * self.sin_incl)
            hdur1 = hp * tt.arcsin(factor * tt.sqrt(arg1)) / np.pi
            hdur2 = hp * tt.arcsin(factor * tt.sqrt(arg2)) / np.pi
            ts = [-hdur1, -hdur2, hdur2, hdur1]
            flag = z

        else:
            M_contact1 = self.contact_points_op(self.a, self.ecc,
                                                self.cos_omega, self.sin_omega,
                                                self.cos_incl + z,
                                                self.sin_incl + z, R + r)
            M_contact2 = self.contact_points_op(self.a, self.ecc,
                                                self.cos_omega, self.sin_omega,
                                                self.cos_incl + z,
                                                self.sin_incl + z, R - r)

            flag = M_contact1[2] + M_contact2[2]

            ts = [
                tt.mod(
                    (M_contact1[0] - self.M0) / self.n + hp, self.period) - hp,
                tt.mod(
                    (M_contact2[0] - self.M0) / self.n + hp, self.period) - hp,
                tt.mod(
                    (M_contact2[1] - self.M0) / self.n + hp, self.period) - hp,
                tt.mod(
                    (M_contact1[1] - self.M0) / self.n + hp, self.period) - hp
            ]

        start = self.period * tt.floor((tt.min(t) - self.t0) / self.period)
        end = self.period * (tt.ceil((tt.max(t) - self.t0) / self.period) + 1)
        start += self.t0
        end += self.t0
        tout = []
        for i in range(4):
            if z.ndim < 1:
                tout.append(ts[i] + tt.arange(start, end, self.period))
            else:
                tout.append(
                    theano.scan(
                        fn=lambda t0, s0, e0, p0: t0 + tt.arange(s0, e0, p0),
                        sequences=[ts[i], start, end, self.period],
                    )[0].flatten())

        ts = tt.sort(tt.concatenate(tout))
        return ts, flag
예제 #43
0
    def generate_forward_diffusion_sample(self, X_noiseless):
        """
        Corrupt a training image with t steps worth of Gaussian noise, and
        return the corrupted image, as well as the mean and covariance of the
        posterior q(x^{t-1}|x^t, x^0).
        """

        X_noiseless = X_noiseless.reshape(
            (-1, self.n_colors, self.spatial_width, self.spatial_width))

        n_images = X_noiseless.shape[0].astype('int16')
        rng = Random().theano_rng
        # choose a timestep in [1, self.trajectory_length-1].
        # note the reverse process is fixed for the very
        # first timestep, so we skip it.
        # TODO for some reason random_integer is missing from the Blocks
        # theano random number generator.
        t = T.floor(rng.uniform(size=(1,1), low=1, high=self.trajectory_length,
            dtype=theano.config.floatX))
        t_weights = self.get_t_weights(t)
        N = rng.normal(size=(n_images, self.n_colors, self.spatial_width, self.spatial_width),
            dtype=theano.config.floatX)

        # noise added this time step
        beta_forward = self.get_beta_forward(t)
        # decay in noise variance due to original signal this step
        alpha_forward = 1. - beta_forward
        # compute total decay in the fraction of the variance due to X_noiseless
        alpha_arr = 1. - self.beta_arr
        alpha_cum_forward_arr = T.extra_ops.cumprod(alpha_arr).reshape((self.trajectory_length,1))
        alpha_cum_forward = T.dot(t_weights.T, alpha_cum_forward_arr)
        # total fraction of the variance due to noise being mixed in
        beta_cumulative = 1. - alpha_cum_forward
        # total fraction of the variance due to noise being mixed in one step ago
        beta_cumulative_prior_step = 1. - alpha_cum_forward/alpha_forward

        # generate the corrupted training data
        X_uniformnoise = X_noiseless + (rng.uniform(size=(n_images, self.n_colors, self.spatial_width, self.spatial_width),
            dtype=theano.config.floatX)-T.constant(0.5,dtype=theano.config.floatX))*T.constant(self.uniform_noise,dtype=theano.config.floatX)
        X_noisy = X_uniformnoise*T.sqrt(alpha_cum_forward) + N*T.sqrt(1. - alpha_cum_forward)

        # compute the mean and covariance of the posterior distribution
        mu1_scl = T.sqrt(alpha_cum_forward / alpha_forward)
        mu2_scl = 1. / T.sqrt(alpha_forward)
        cov1 = 1. - alpha_cum_forward/alpha_forward
        cov2 = beta_forward / alpha_forward
        lam = 1./cov1 + 1./cov2
        mu = (
                X_uniformnoise * mu1_scl / cov1 +
                X_noisy * mu2_scl / cov2
            ) / lam
        sigma = T.sqrt(1./lam)
        sigma = sigma.reshape((1,1,1,1))

        mu.name = 'mu q posterior'
        sigma.name = 'sigma q posterior'
        X_noisy.name = 'X_noisy'
        t.name = 't'

        return X_noisy, t, mu, sigma
예제 #44
0
파일: ttv.py 프로젝트: dfm/exoplanet
 def _warp_times(self, t):
     delta = tt.shape_padleft(t) / tt.shape_padright(self.period, t.ndim)
     delta += tt.shape_padright(self._base_time, t.ndim)
     ind = tt.cast(tt.floor(delta), "int64")
     dt = tt.stack([ttv[tt.clip(ind[i], 0, ttv.shape[0]-1)]
                    for i, ttv in enumerate(self.ttvs)], -1)
     return tt.shape_padright(t) + dt
 def inv(self, output):
     
     output = (output.dimshuffle(0,1,2,'x',3,'x')
         .repeat(self.pool_shape[1], axis=5)
         .repeat(self.pool_shape[0], axis=3))
     
     if self.depooler == 'random':
         unpooled = (
             self.input_shape[0], self.input_shape[1], 
             self.input_shape[2]//self.pool_shape[0], self.pool_shape[0],
             self.input_shape[3]//self.pool_shape[1], self.pool_shape[1])
         
         pooled = (
             self.input_shape[0], self.input_shape[1], 
             self.input_shape[2]//self.pool_shape[0], 1,
             self.input_shape[3]//self.pool_shape[1], 1)
         
         output_mask = self.theano_rng.uniform(size=unpooled, dtype=theano.config.floatX)
         output_mask = output_mask / output_mask.max(axis=5).max(axis=3).dimshuffle(0,1,2,'x',3,'x')
         output_mask = T.floor(output_mask)
         
         return (output_mask * output).reshape(self.input_shape)
     else:
         output = self.depooler(output, axis=5)
         output = self.depooler(output, axis=3)
         return output
예제 #46
0
파일: crop.py 프로젝트: mohammadpz/rna
    def compute_hard_windows(self, image_shape, location, scale):
        # find topleft(front) and bottomright(back) corners for each patch
        a = location - 0.5 * (T.cast(self.patch_shape, theano.config.floatX) / scale)
        b = location + 0.5 * (T.cast(self.patch_shape, theano.config.floatX) / scale)

        # grow by three patch pixels
        a -= self.kernel.k_sigma_radius(self.cutoff, scale)
        b += self.kernel.k_sigma_radius(self.cutoff, scale)

        # clip to fit inside image and have nonempty window
        a = T.clip(a, 0, image_shape - 1)
        b = T.clip(b, a + 1, image_shape)

        if self.batched_window:
            # take the bounding box of all windows; now the slices
            # will have the same length for each sample and scan can
            # be avoided.  comes at the cost of typically selecting
            # more of the input.
            a = a.min(axis=0, keepdims=True)
            b = b.max(axis=0, keepdims=True)

        # make integer
        a = T.cast(T.floor(a), 'int16')
        b = T.cast(T.ceil(b), 'int16')

        return a, b
예제 #47
0
def discrete_grads(loss,network,LR):
    global update_type,best_params,H,N,th # th is a parameter that controls the nonlinearity of state transfer probability

    W_params = lasagne.layers.get_all_params(network, discrete=True) #Get all the weight parameters
    layers = lasagne.layers.get_all_layers(network)
	
    W_grads = []
    for layer in layers:
        params = layer.get_params(discrete=True)
        if params:
            W_grads.append(theano.grad(loss, wrt=layer.W)) #Here layer.W = weight_tune(param)  
    updates = lasagne.updates.adam(loss_or_grads=W_grads,params=W_params,learning_rate=LR)  

    for param, parambest in izip(W_params, best_params) :

        L = 2*H/pow(2,N) #state step length in Z_N 
		
        a=random.random() #c is a random variable with binary value       
        if a<0.85:
           c = 1
        else:
           c = 0
        
        b=random.random()
        state_rand = T.round(b*pow(2,N))*L-H #state_rand is a random state in the discrete weight space Z_N
        
        delta_W1 =c*(state_rand-parambest)#parambest would transfer to state_rand with probability of a, or keep unmoved with probability of 1-a
        delta_W1_direction = T.cast(T.sgn(delta_W1),theano.config.floatX)
		dis1=T.abs_(delta_W1) #the absolute distance
        k1=delta_W1_direction*T.floor(dis1/L) #the integer part
        v1=delta_W1-k1*L #the decimal part
        Prob1= T.abs_(v1/L) #the transfer probability
	    Prob1 = T.tanh(th*Prob1) #the nonlinear tanh() function accelerates the state transfer
예제 #48
0
def matrix_noise3d(input_vectors, perm, grad3, vertex_table):
    skew_factors = (input_vectors[:, 0] + input_vectors[:, 1] + input_vectors[:, 2]) * 1.0 / 3.0
    skewed_vectors = T.floor(input_vectors + skew_factors[:, np.newaxis])
    unskew_factors = (skewed_vectors[:, 0] + skewed_vectors[:, 1] + skewed_vectors[:, 2]) * 1.0 / 6.0
    offsets_0 = input_vectors - (skewed_vectors - unskew_factors[:, np.newaxis])
    vertex_table_x_index = T.ge(offsets_0[:, 0], offsets_0[:, 1])
    vertex_table_y_index = T.ge(offsets_0[:, 1], offsets_0[:, 2])
    vertex_table_z_index = T.ge(offsets_0[:, 0], offsets_0[:, 2])
    simplex_vertices = vertex_table[
        vertex_table_x_index,
        vertex_table_y_index,
        vertex_table_z_index].reshape((input_vectors.shape[0], 2, 3))
    offsets_1 = offsets_0 - simplex_vertices[:, 0] + 1.0 / 6.0
    offsets_2 = offsets_0 - simplex_vertices[:, 1] + 1.0 / 3.0
    offsets_3 = offsets_0 - 0.5
    masked_skewed_vectors = T.bitwise_and(skewed_vectors.astype('int32'), 255)
    gi0s = perm[masked_skewed_vectors[:, 0] + perm[
        masked_skewed_vectors[:, 1] + perm[
            masked_skewed_vectors[:, 2]].astype('int32')].astype('int32')] % 12
    gi1s = perm[masked_skewed_vectors[:, 0] + simplex_vertices[:, 0, 0] + perm[
        masked_skewed_vectors[:, 1] + simplex_vertices[:, 0, 1] + perm[
            masked_skewed_vectors[:, 2] + simplex_vertices[:, 0, 2]].astype('int32')].astype('int32')] % 12
    gi2s = perm[masked_skewed_vectors[:, 0] + simplex_vertices[:, 1, 0] + perm[
        masked_skewed_vectors[:, 1] + simplex_vertices[:, 1, 1] + perm[
            masked_skewed_vectors[:, 2] + simplex_vertices[:, 1, 2]].astype('int32')].astype('int32')] % 12
    gi3s = perm[masked_skewed_vectors[:, 0] + 1 + perm[
        masked_skewed_vectors[:, 1] + 1 + perm[
            masked_skewed_vectors[:, 2] + 1].astype('int32')].astype('int32')] % 12
    n0s = calculate_gradient_contribution(offsets_0, gi0s, grad3)
    n1s = calculate_gradient_contribution(offsets_1, gi1s, grad3)
    n2s = calculate_gradient_contribution(offsets_2, gi2s, grad3)
    n3s = calculate_gradient_contribution(offsets_3, gi3s, grad3)
    return 23.0 * (n0s + n1s + n2s + n3s)
예제 #49
0
def quantizeNormalizedWeightStochastic(w,B,scale,srng): #ref is -1 to 1
    promoted = (w/scale)*T.pow(2.0,B-1.0)
    floored = T.floor(promoted)
    diff = promoted-floored
    toAdd = T.switch(srng.binomial(size=w.shape,p=diff),1.0,0.0)
    stochasticed = promoted+toAdd
    return scale*T.minimum(1.0-T.pow(2.0,1.0-B),stochasticed*T.pow(2.0,1.0-B))
예제 #50
0
    def get_output_for( self, inputs ,**kwargs ):
        # For each ROI R = [batch_index x1 y1 x2 y2]: max pool over R
        input = inputs[0]
        boxes = inputs[1]
        batch = T.shape (input)[0]
        channels = T.shape (input)[1]
        height = T.shape( input )[2]
        width = T.shape( input )[3]
        num_boxes = T.shape(boxes)[0]
        output = T.zeros((batch * num_boxes , channels, self.num_features))

        for idbb,bb in enumerate(range(num_boxes)):
            batch_ind = bb[0]

            pool_list = []
            #for pool_dim in self.pool_dims:
            start_w = T.clip(T.floor(bb[1] * self.sp_scale),0,width)
            start_h = T.clip(T.floor(bb[2] * self.sp_scale),0,heigth)
            end_w = T.clip(T.ceil(bb[3] * self.sp_scale),0,width)
            end_h = T.clip(T.ceil(bb[4] * self.sp_scale),0,height)

            w = T.max(end_w - start_w +1,1)
            h = T.amx(end_h - start_h +1,1)

            start_samples_y,start_sample_x = T.floor(_meshgrid(start_h,end_h,pool_dims+1,start_w,end_w,pool_dims+1))
            end_samples_y,end_sample_x = T.ceil(_meshgrid(start_h,end_h,pool_dims+1,start_w,end_w,pool_dims+1))

            input[batch_ind,:,np.floor(py):np.ceil(samples_y[idy+1]),np.floor(px):np.ceil(samples_x[idx+1])]
            
            #T.max()

            #for idx,px in enumerate(samples_x[:-1]):
            #    for idy,py in enumerate(samples_y[:-1]):

             #       (pool.dnn_pool( input[batch_ind,:,np.floor(py):np.ceil(samples_y[idy+1]),np.floor(px):np.ceil(samples_x[idx+1])],(0,0),(None,None),'max', (0,0) )).flatten(2)

                #sz_w = ( w - 1 ) // pool_dim
                #sz_h = ( h - 1 ) // pool_dim

                #str_h = w // pool_dim
                #str_w = h // pool_dim

                #pool = dnn.dnn_pool( input[bb[0],:,start_h:end_h+1,start_w:end_w+1], (sz_h,sz_w),                 (str_h,str_w), 'max', (0,0) ).flatten(2)
        pool_list.append( pool )
        output[idbb] = T.transpose(T.concatenate( pool_list, axis=1 )) #not efficient but for the moment is ok!
        #if everything is correct this vector should be ordered as in fast RCNN    
        return output
예제 #51
0
파일: volumetric.py 프로젝트: zenna/ig
def raymarch(img, left_over, i, step_size, orig, rd, res, shape_params):
    pos = orig + rd*step_size*i
    voxel_indices = T.floor(pos*res)
    pruned = T.clip(voxel_indices,0,res-1)
    p_int =  T.cast(pruned, 'int32')
    indices = T.reshape(p_int, (width*height,3))
    value = shape_params[indices[:,0],indices[:,1],indices[:,2]] / nsteps
    return (img + value * left_over, {left_over : (1-value)*left_over, i : i+1})
예제 #52
0
    def quantized_bprop(self, cost):
        index_low = T.switch(self.varin > 0.,
            T.floor(T.log2(self.varin)), T.floor(T.log2(-self.varin))
        )
        index_low = T.clip(index_low, -4, 3)
        sign = T.switch(self.varin > 0., 1., -1.)
        # the upper 2**(integer power) though not used explicitly.
        # index_up = index_low + 1
        # percentage of upper index.
        p_up = sign * self.varin / 2**(index_low) - 1
        index_random = index_low + self.srng.binomial(
            n=1, p=p_up, size=T.shape(self.varin), dtype=theano.config.floatX)
        quantized_rep = sign * 2**index_random

        error = T.grad(cost=cost, wrt=self.varfanin)

        self.dEdW = T.dot(quantized_rep.T, error)
예제 #53
0
파일: cold2.py 프로젝트: zenna/ig
def gen_img(shape_params, rotation_matrix, width, height, nsteps, res):
    raster_space = gen_fragcoords(width, height)
    rd, ro = make_ro(rotation_matrix, raster_space, width, height)
    a = 0 - ro # c = 0
    b = 1 - ro # c = 1
    nmatrices = rotation_matrix.shape[0]
    tn = T.reshape(a, (nmatrices, 1, 1, 3))/rd
    tf = T.reshape(b, (nmatrices, 1, 1, 3))/rd
    tn_true = T.minimum(tn,tf)
    tf_true = T.maximum(tn,tf)
    # do X
    tn_x = tn_true[:,:,:,0]
    tf_x = tf_true[:,:,:,0]
    tmin = 0.0
    tmax = 10.0
    t0 = tmin
    t1 = tmax
    t02 = T.switch(tn_x > t0, tn_x, t0)
    t12 = T.switch(tf_x < t1, tf_x, t1)
    # y
    tn_x = tn_true[:,:,:,1]
    tf_x = tf_true[:,:,:,1]
    t03 = T.switch(tn_x > t02, tn_x, t02)
    t13 = T.switch(tf_x < t12, tf_x, t12)
    #z
    tn_x = tn_true[:,:,:,2]
    tf_x = tf_true[:,:,:,2]
    t04 = T.switch(tn_x > t03, tn_x, t03)
    t14 = T.switch(tf_x < t13, tf_x, t13)

    # Shift a little bit to avoid numerial inaccuracies
    t04 = t04*1.001
    t14 = t14*0.999

    nvoxgrids = shape_params.shape[0]
    left_over = T.ones((nvoxgrids, nmatrices * width * height,))
    step_size = (t14 - t04)/nsteps
    orig = T.reshape(ro, (nmatrices, 1, 1, 3)) + rd * T.reshape(t04,(nmatrices, width, height, 1))
    xres = yres = zres = res

    orig = T.reshape(orig, (nmatrices * width * height, 3))
    rd = T.reshape(rd, (nmatrices * width * height, 3))
    step_sz = T.reshape(step_size, (nmatrices * width * height,1))

    for i in range(nsteps):
        # print "step", i
        pos = orig + rd*step_sz*i
        voxel_indices = T.floor(pos*res)
        pruned = T.clip(voxel_indices,0,res-1)
        p_int =  T.cast(pruned, 'int32')
        indices = T.reshape(p_int, (nmatrices*width*height,3))
        attenuation = shape_params[:, indices[:,0],indices[:,1],indices[:,2]]
        left_over = left_over*T.exp(-attenuation*T.flatten(step_sz))

    img = left_over
    pixels = T.reshape(img, (nvoxgrids, nmatrices, width, height))
    mask = t14>t04
    return T.switch(t14>t04, pixels, T.ones_like(pixels)), rd, ro, tn_x, T.ones((nvoxgrids, nmatrices * width * height,)), orig, shape_params
 def get_hidden_values(self, input, batch_size):
     self.indices_high = T.ceil(self.indices).astype('int8')
     self.indices_low = T.floor(self.indices).astype('int8')
     self.factors_high = self.W[self.indices_high]
     self.factors_low = self.W[self.indices_low]
     self.factors = (self.factors_high - self.factors_low) * (self.indices - self.indices_low) / \
                    (self.indices_high - self.indices_low + 1E-5) + self.factors_low
     self.output = T.sum(self.x * T.transpose(self.factors).dimshuffle(0, 'x', 1), axis=2) / \
                   (self.length + 1.0).dimshuffle(0, 'x')
def apply_sampling(source_grid,img,o_height,o_width):
    n_batch,n_channel,i_height,i_width = img.shape
#    # 0 0 0 1 1 1 2 2 2 0 0 0 1 1 1 2 2 2
#    source_grid_row = source_grid[:,0,:].reshape((n_batch,1,-1)).repeat(n_channel,axis=1).flatten()
#    # 0 1 2 0 1 2 0 1 2
#    source_grid_col = source_grid[:,1,:].reshape((n_batch,1,-1)).repeat(n_channel,axis=1).flatten()
    
    source_grid_row = source_grid[:,1,:].flatten()    
    source_grid_col = source_grid[:,0,:].flatten()
    
    
    # (T.dot(T.ones((2,2,3)),T.stack([T.ones((10,1)).flatten(),T.ones((10,1)).flatten(),T.ones((10,1)).flatten()]))[:,0,:]*2
    # + T.dot(T.ones((2,2,3)),T.stack([T.ones((10,1)).flatten(),T.ones((10,1)).flatten(),T.ones((10,1)).flatten()]))[:,1,:]).eval().shape
    # batch1 row1 row2 row3 ... batch2 row1 ...
    img = img.dimshuffle((0,2,3,1)).reshape((-1,n_channel)).astype(theano.config.floatX)
    source_grid_row=(source_grid_row+1.0)/2.0*(i_height-1.0)
    source_grid_col=(source_grid_col+1.0)/2.0*(i_width-1.0)
    source_grid_row_floor=T.floor(source_grid_row).astype('int64')
    source_grid_col_floor=T.floor(source_grid_col).astype('int64')
    source_grid_row_ceil=T.clip(source_grid_row_floor+1,0,i_height-1).astype('int64')
    source_grid_col_ceil=T.clip(source_grid_col_floor+1,0,i_width-1).astype('int64')
    # output = img[source_grid_row*i_width+source_grid_col].reshape((n_batch,n_channel,o_width,o_height))
    
    batch_base=(T.arange(n_batch)*(i_height*i_width)).repeat(o_height*o_width).astype('int64')
    
    # bilinear interpolation
    output_nw = img[source_grid_row_floor*i_width+source_grid_col_floor+batch_base,:]
    output_ne = img[source_grid_row_floor*i_width+source_grid_col_ceil+batch_base,:]
    output_sw = img[source_grid_row_ceil*i_width+source_grid_col_floor+batch_base,:]
    output_se = img[source_grid_row_ceil*i_width+source_grid_col_ceil+batch_base,:]
    
    weight_nw = ((source_grid_row_ceil-source_grid_row)*(source_grid_col_ceil-source_grid_col)).reshape((-1,1)).repeat(n_channel,axis=1)
    weight_ne = -((source_grid_row_ceil-source_grid_row)*(source_grid_col_floor-source_grid_col)).reshape((-1,1)).repeat(n_channel,axis=1)
    weight_sw = -((source_grid_row_floor-source_grid_row)*(source_grid_col_ceil-source_grid_col)).reshape((-1,1)).repeat(n_channel,axis=1)
    weight_se = ((source_grid_row_floor-source_grid_row)*(source_grid_col_floor-source_grid_col)).reshape((-1,1)).repeat(n_channel,axis=1)
    
    output =(output_nw*weight_nw+output_ne*weight_ne+output_sw*weight_sw+output_se*weight_se)
    output = T.reshape(output,(n_batch,o_height,o_width,n_channel)).dimshuffle((0,3,1,2))
        
        
#    source_grid_row=((source_grid_row+1.0)/2.0*(i_height-1.0)).astype('int16')
#    source_grid_col=((source_grid_col+1.0)/2.0*(i_width-1.0)).astype('int16')
#    output = img[source_grid_row*i_width+source_grid_col].reshape((n_batch,n_channel,o_height,o_width))
    return output.astype(theano.config.floatX)
예제 #56
0
def ShiftConv(w_t_g, s_t, N, num_shifts):
    # pad = (num_shifts//2, (num_shifts-1)//2)
    # w_t_g_pd_ = T.concatenate([w_t_g[(-pad[0]-1):-1], w_t_g, w_t_g[:(pad[1])]])
    # w_t_g_pd = w_t_g_pd_.dimshuffle('x','x','x', 0)
    # filter = s_t.dimshuffle('x', 'x', 'x', 0)
    # convolution = T.nnet.conv2d(w_t_g_pd, filter,
    # input_shape=(1, 1, 1, N + pad[0] + pad[1]),
    # filter_shape=(1, 1, 1, num_shifts),
    # subsample=(1, 1),
    # border_mode='valid')
    # w_t_s = convolution[0, 0, 0, :]
    shift = 2.*s_t-1.
    Z = T.mod(shift+N, N)
    simj = 1 - (Z - T.floor(Z))
    imj = T.mod(T.arange(N) + T.iround(T.floor(Z)),N)
    w_t_g_roll_1 = T.roll(w_t_g, -T.iround(T.floor(Z)))
    w_t_g_roll_2 = T.roll(w_t_g, -(T.iround(T.floor(Z))+1))
    w_t_s = w_t_g_roll_1*simj + w_t_g_roll_2*(1-simj)
    return w_t_s
예제 #57
0
    def rrf(self, val, scale):
        if(self.rr<=0):
            return val
        #r= T.cast(T.round(val*self.rr),dtype=config.floatX)/self.rr
        #r=T.clip(r,-1,1)
        
        if self.prob_round:
            # round 'val' probabilistically between -scale and scale with self.rr number of possible values
            q = T.abs_(1.0/scale * val * self.rr)
            p = q-T.floor(q)
            r = T.sgn(val) * (T.floor(q)+self.rng.binomial(size=val.shape, n=1, p=p)) / self.rr * scale
            r = T.clip(r, -scale, scale)
    #         print q, p, r
        else:
            # deterministic rounding of 'val' between -scale and scale with self.rr number of possible values
            r= T.round(val*self.rr)/self.rr
            r=T.clip(r, -scale, scale)

        return T.cast(r, dtype=config.floatX)