コード例 #1
0
    def getRpRnTpTnForTrain0OrVal1(self, y, training0OrValidation1):
        # The returned list has (numberOfClasses)x4 integers: >numberOfRealPositives, numberOfRealNegatives, numberOfTruePredictedPositives, numberOfTruePredictedNegatives< for each class (incl background).
        # Order in the list is the natural order of the classes (ie class-0 RP,RN,TPP,TPN, class-1 RP,RN,TPP,TPN, class-2 RP,RN,TPP,TPN ...)
        # param y: y = T.itensor4('y'). Dimensions [batchSize, r, c, z]

        yPredToUse = self.y_pred_train if training0OrValidation1 == 0 else self.y_pred_val
        checkDimsOfYpredAndYEqual(
            y, yPredToUse,
            "training" if training0OrValidation1 == 0 else "validation")

        returnedListWithNumberOfRpRnTpTnForEachClass = []

        for class_i in xrange(0, self._numberOfOutputClasses):
            #Number of Real Positive, Real Negatives, True Predicted Positives and True Predicted Negatives are reported PER CLASS (first for WHOLE).
            tensorOneAtRealPos = T.eq(y, class_i)
            tensorOneAtRealNeg = T.neq(y, class_i)

            tensorOneAtPredictedPos = T.eq(yPredToUse, class_i)
            tensorOneAtPredictedNeg = T.neq(yPredToUse, class_i)
            tensorOneAtTruePos = T.and_(tensorOneAtRealPos,
                                        tensorOneAtPredictedPos)
            tensorOneAtTrueNeg = T.and_(tensorOneAtRealNeg,
                                        tensorOneAtPredictedNeg)

            returnedListWithNumberOfRpRnTpTnForEachClass.append(
                T.sum(tensorOneAtRealPos))
            returnedListWithNumberOfRpRnTpTnForEachClass.append(
                T.sum(tensorOneAtRealNeg))
            returnedListWithNumberOfRpRnTpTnForEachClass.append(
                T.sum(tensorOneAtTruePos))
            returnedListWithNumberOfRpRnTpTnForEachClass.append(
                T.sum(tensorOneAtTrueNeg))

        return returnedListWithNumberOfRpRnTpTnForEachClass
コード例 #2
0
    def getRpRnTpTnForTrain0OrVal1(self, y, training0OrValidation1):
        # The returned list has (numberOfClasses)x4 integers: >numberOfRealPositives, numberOfRealNegatives, numberOfTruePredictedPositives, numberOfTruePredictedNegatives< for each class (incl background).
        # Order in the list is the natural order of the classes (ie class-0 RP,RN,TPP,TPN, class-1 RP,RN,TPP,TPN, class-2 RP,RN,TPP,TPN ...)
        # param y: y = T.itensor4('y'). Dimensions [batchSize, r, c, z]
        
        yPredToUse = self.y_pred_train if  training0OrValidation1 == 0 else self.y_pred_val
        checkDimsOfYpredAndYEqual(y, yPredToUse, "training" if training0OrValidation1 == 0 else "validation")
        
        returnedListWithNumberOfRpRnTpTnForEachClass = []
        
        for class_i in xrange(0, self._numberOfOutputClasses) :
            #Number of Real Positive, Real Negatives, True Predicted Positives and True Predicted Negatives are reported PER CLASS (first for WHOLE).
            tensorOneAtRealPos = T.eq(y, class_i)
            tensorOneAtRealNeg = T.neq(y, class_i)

            tensorOneAtPredictedPos = T.eq(yPredToUse, class_i)
            tensorOneAtPredictedNeg = T.neq(yPredToUse, class_i)
            tensorOneAtTruePos = T.and_(tensorOneAtRealPos,tensorOneAtPredictedPos)
            tensorOneAtTrueNeg = T.and_(tensorOneAtRealNeg,tensorOneAtPredictedNeg)
                    
            returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtRealPos) )
            returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtRealNeg) )
            returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtTruePos) )
            returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtTrueNeg) )
            
        return returnedListWithNumberOfRpRnTpTnForEachClass
コード例 #3
0
ファイル: cnnLayerTypes.py プロジェクト: yochju/deepmedic
    def realPosAndNegAndTruePredPosNegTraining0OrValidation1(
            self, y, training0OrValidation1):
        #***Implemented only for binary***. For multiclass, it counts real-positives as everything not-background, and as true positives the true predicted lesion (ind of class).
        vectorOneAtRealPositives = T.gt(y, 0)
        vectorOneAtRealNegatives = T.eq(y, 0)
        if training0OrValidation1 == 0:  #training:
            yPredToUse = self.y_pred
        else:  #validation
            yPredToUse = self.y_pred_inference
        vectorOneAtPredictedPositives = T.gt(yPredToUse, 0)
        vectorOneAtPredictedNegatives = T.eq(yPredToUse, 0)
        vectorOneAtTruePredictedPositives = T.and_(
            vectorOneAtRealPositives, vectorOneAtPredictedPositives)
        vectorOneAtTruePredictedNegatives = T.and_(
            vectorOneAtRealNegatives, vectorOneAtPredictedNegatives)

        numberOfRealPositives = T.sum(vectorOneAtRealPositives)
        numberOfRealNegatives = T.sum(vectorOneAtRealNegatives)
        numberOfTruePredictedPositives = T.sum(
            vectorOneAtTruePredictedPositives)
        numberOfTruePredictedNegatives = T.sum(
            vectorOneAtTruePredictedNegatives)

        return [
            numberOfRealPositives, numberOfRealNegatives,
            numberOfTruePredictedPositives, numberOfTruePredictedNegatives
        ]
コード例 #4
0
 def ber(self, y):
     tp = T.and_(T.eq(y, 1), T.eq(self.y_pred, 1)).sum()
     tn = T.and_(T.eq(y, 0), T.eq(self.y_pred, 0)).sum()
     fp = T.and_(T.eq(y, 0), T.eq(self.y_pred, 1)).sum()
     fn = T.and_(T.eq(y, 1), T.eq(self.y_pred, 0)).sum()
     ber = 0.5 * (T.true_div(fp, tp + fp) + T.true_div(fn, tn + fn))
     return ber
コード例 #5
0
ファイル: cnnLayerTypes.py プロジェクト: pliu007/deepmedic
    def multiclassRealPosAndNegAndTruePredPosNegTraining0OrValidation1(self, y, training0OrValidation1):
	"""
	The returned list has (numberOfClasses)x4 integers: >numberOfRealPositives, numberOfRealNegatives, numberOfTruePredictedPositives, numberOfTruePredictedNegatives< for each class (incl background).
	Order in the list is the natural order of the classes (ie class-0 RP,RN,TPP,TPN, class-1 RP,RN,TPP,TPN, class-2 RP,RN,TPP,TPN ...)
	"""
	returnedListWithNumberOfRpRnPpPnForEachClass = []

	for class_i in xrange(0, self.numberOfOutputClasses) :
		#Number of Real Positive, Real Negatives, True Predicted Positives and True Predicted Negatives are reported PER CLASS (first for WHOLE).
		vectorOneAtRealPositives = T.eq(y, class_i)
		vectorOneAtRealNegatives = T.neq(y, class_i)

		if training0OrValidation1 == 0 : #training:
			yPredToUse = self.y_pred
		else: #validation
			yPredToUse = self.y_pred_inference

		vectorOneAtPredictedPositives = T.eq(yPredToUse, class_i)
		vectorOneAtPredictedNegatives = T.neq(yPredToUse, class_i)
		vectorOneAtTruePredictedPositives = T.and_(vectorOneAtRealPositives,vectorOneAtPredictedPositives)
		vectorOneAtTruePredictedNegatives = T.and_(vectorOneAtRealNegatives,vectorOneAtPredictedNegatives)
		    
		returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtRealPositives) )
		returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtRealNegatives) )
		returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtTruePredictedPositives) )
		returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtTruePredictedNegatives) )

	return returnedListWithNumberOfRpRnPpPnForEachClass
コード例 #6
0
def incomplete_beta(a, b, value):
    '''Incomplete beta implementation
    Power series and continued fraction expansions chosen for best numerical
    convergence across the board based on inputs.
    '''
    machep = tt.constant(np.MachAr().eps, dtype='float64')
    one = tt.constant(1, dtype='float64')
    w = one - value

    ps = incomplete_beta_ps(a, b, value)

    flip = tt.gt(value, (a / (a + b)))
    aa, bb = a, b
    a = tt.switch(flip, bb, aa)
    b = tt.switch(flip, aa, bb)
    xc = tt.switch(flip, value, w)
    x = tt.switch(flip, w, value)

    tps = incomplete_beta_ps(a, b, x)
    tps = tt.switch(tt.le(tps, machep), one - machep, one - tps)

    # Choose which continued fraction expansion for best convergence.
    small = tt.lt(x * (a + b - 2.0) - (a - one), 0.0)
    cfe = incomplete_beta_cfe(a, b, x, small)
    w = tt.switch(small, cfe, cfe / xc)

    # Direct incomplete beta accounting for flipped a, b.
    t = tt.exp(a * tt.log(x) + b * tt.log(xc) + gammaln(a + b) - gammaln(a) -
               gammaln(b) + tt.log(w / a))

    t = tt.switch(flip, tt.switch(tt.le(t, machep), one - machep, one - t), t)
    return tt.switch(
        tt.and_(flip, tt.and_(tt.le((b * x), one), tt.le(x, 0.95))), tps,
        tt.switch(tt.and_(tt.le(b * value, one), tt.le(value, 0.95)), ps, t))
コード例 #7
0
ファイル: starry.py プロジェクト: emilygilbert/exoplanet
def depth_grad(r, b):
    # depth = 1 - s0 / pi; where s0 is from Agol+
    b = tt.abs_(b)
    r = tt.abs_(r)
    b2 = b**2
    opr = 1 + r
    omr = 1 - r
    rmo = r - 1

    # Case 2
    a = kite_area(r, b)
    twor = 2 * r
    twoa = 2 * a
    k0 = tt.arctan2(twoa, rmo * opr + b2)
    dr = twor * k0 / np.pi
    db = -twoa / (b * np.pi)

    zero = tt.zeros_like(r)
    return (
        tt.switch(
            tt.le(opr, b),
            zero,
            tt.switch(
                tt.and_(tt.lt(tt.abs_(omr), b), tt.lt(b, opr)),
                dr,
                tt.switch(tt.le(b, omr), twor, zero),
            ),
        ),
        tt.switch(
            tt.le(opr, b),
            zero,
            tt.switch(tt.and_(tt.lt(tt.abs_(omr), b), tt.lt(b, opr)), db,
                      zero),
        ),
    )
コード例 #8
0
def one_run(my_x, my_y, my_z,
            my_u, my_v, my_w,
            my_weight,
            my_heat, my_albedo, my_microns_per_shell):

    # move
    random = rng.uniform(low=0.00003, high=1.)
    t = -T.log(random)

    x_moved = my_x + my_u*t
    y_moved = my_y + my_v*t
    z_moved = my_z + my_w*t

    # absorb
    shell = T.cast(T.sqrt(T.sqr(x_moved) + T.sqr(y_moved) + T.sqr(z_moved))
                   * my_microns_per_shell, 'int32')
    shell = T.clip(shell, 0, SHELL_MAX-1)

    new_weight = my_weight * my_albedo

    # new direction
    xi1 = rng.uniform(low=-1., high=1.)
    xi2 = rng.uniform(low=-1., high=1.)
    xi_norm = T.sqrt(T.sqr(xi1) + T.sqr(xi2))

    t_xi = rng.uniform(low=0.000000001, high=1.)

    # rescale xi12 to fit t_xi as norm
    xi1 = xi1/xi_norm * T.sqr(t_xi)
    xi2 = xi2/xi_norm * T.sqr(t_xi)

    u_new_direction = 2. * t_xi - 1.
    v_new_direction = xi1 * T.sqrt((1. - T.sqr(u_new_direction)) / t_xi)
    w_new_direction = xi2 * T.sqrt((1. - T.sqr(u_new_direction)) / t_xi)

    # roulette
    weight_for_starting_roulette = 0.001
    CHANCE = 0.1
    partakes_roulette = T.switch(T.lt(new_weight, weight_for_starting_roulette),
                                 1,
                                 0)
    roulette = rng.uniform(low=0., high=1.)
    loses_roulette = T.gt(roulette, CHANCE)
    # if roulette decides to terminate the photon: set weight to 0
    weight_after_roulette = ifelse(T.and_(partakes_roulette, loses_roulette),
                                     0.,
                                     new_weight)
    # if partakes in roulette but does not get terminated
    weight_after_roulette = ifelse(T.and_(partakes_roulette, T.invert(loses_roulette)),
                                     weight_after_roulette / CHANCE,
                                     weight_after_roulette)

    new_heat = (1.0 - my_albedo) * my_weight
    heat_i = my_heat[shell]

    return (x_moved, y_moved, z_moved,\
           u_new_direction, v_new_direction, w_new_direction,\
           weight_after_roulette),\
           OrderedDict({my_heat: T.inc_subtensor(heat_i, new_heat)})
コード例 #9
0
 def confusion_matrix(self, y):
     """
     Returns confusion matrix
     """
     tp = T.and_(T.eq(y, 1), T.eq(self.y_pred, 1)).sum()
     tn = T.and_(T.eq(y, 0), T.eq(self.y_pred, 0)).sum()
     fp = T.and_(T.eq(y, 0), T.eq(self.y_pred, 1)).sum()
     fn = T.and_(T.eq(y, 1), T.eq(self.y_pred, 0)).sum()
     return [tp, tn, fp, fn]
コード例 #10
0
    def in_transit(self, t, r=0.0, texp=None):
        """Get a list of timestamps that are in transit

        Args:
            t (vector): A vector of timestamps to be evaluated.
            r (Optional): The radii of the planets.
            texp (Optional[float]): The exposure time.

        Returns:
            The indices of the timestamps that are in transit.

        """

        z = tt.zeros_like(self.a)
        r = tt.as_tensor_variable(r) + z
        R = self.r_star + z

        # Wrap the times into time since transit
        hp = 0.5 * self.period
        dt = tt.mod(self._warp_times(t) - self.t0 + hp, self.period) - hp

        if self.ecc is None:
            # Equation 14 from Winn (2010)
            k = r / self.r_star
            arg = tt.square(1 + k) - tt.square(self.b)
            hdur = hp * tt.arcsin(self.r_star / self.a *
                                  tt.sqrt(arg) / self.sin_incl) / np.pi
            t_start = -hdur
            t_end = hdur
            flag = z

        else:
            M_contact = self.contact_points_op(
                self.a, self.ecc, self.cos_omega, self.sin_omega,
                self.cos_incl + z, self.sin_incl + z, R + r)
            flag = M_contact[2]

            t_start = (M_contact[0] - self.M0) / self.n
            t_start = tt.mod(t_start + hp, self.period) - hp
            t_end = (M_contact[1] - self.M0) / self.n
            t_end = tt.mod(t_end + hp, self.period) - hp

        if texp is not None:
            t_start -= 0.5*texp
            t_end += 0.5*texp

        mask = tt.any(tt.and_(dt >= t_start, dt <= t_end), axis=-1)
        result = ifelse(tt.and_(tt.all(tt.eq(flag, 0)),
                                tt.all(tt.gt(t_end, t_start))),
                        tt.arange(t.size)[mask],
                        tt.arange(t.size))

        return result
コード例 #11
0
ファイル: logistic_sgd.py プロジェクト: chagge/DeepLearning
 def errorReport(self, y, n):
     # compute error rate by class
     # check if y has same dimension of y_pred
     if y.ndim != self.y_pred.ndim:
         raise TypeError('y should have the same shape as self.y_pred',
             ('y', target.type, 'y_pred', self.y_pred.type))
     # check if y is of the correct datatype
     if y.dtype.startswith('int'):
         c = numpy.zeros((self.n_out, self.n_out + 1), dtype=numpy.int64)
         counts = T.as_tensor_variable(c)
         classVector = numpy.zeros(n)
         for i in xrange(self.n_out):
             othersVector = numpy.zeros(n)
             for j in xrange(self.n_out):
                 counts = theano.tensor.basic.set_subtensor(
                     counts[i, j],
                     T.sum(T.and_(T.eq(self.y_pred, othersVector),
                                  T.eq(y, classVector))))
                 othersVector = othersVector + 1
             counts = theano.tensor.basic.set_subtensor(
                 counts[i, self.n_out],
                 T.sum(T.eq(y, classVector)))
             classVector = classVector + 1
         return counts
     else:
         raise NotImplementedError()
コード例 #12
0
 def errorReport(self, y, n):
     # compute error rate by class
     # check if y has same dimension of y_pred
     if y.ndim != self.y_pred.ndim:
         raise TypeError('y should have the same shape as self.y_pred',
                         ('y', target.type, 'y_pred', self.y_pred.type))
     # check if y is of the correct datatype
     if y.dtype.startswith('int'):
         c = numpy.zeros((self.n_out, self.n_out + 1), dtype=numpy.int64)
         counts = T.as_tensor_variable(c)
         classVector = numpy.zeros(n)
         for i in xrange(self.n_out):
             othersVector = numpy.zeros(n)
             for j in xrange(self.n_out):
                 counts = theano.tensor.basic.set_subtensor(
                     counts[i, j],
                     T.sum(
                         T.and_(T.eq(self.y_pred, othersVector),
                                T.eq(y, classVector))))
                 othersVector = othersVector + 1
             counts = theano.tensor.basic.set_subtensor(
                 counts[i, self.n_out], T.sum(T.eq(y, classVector)))
             classVector = classVector + 1
         return counts
     else:
         raise NotImplementedError()
コード例 #13
0
    def FPR(self, y):
        """Return a float representing the number of errors in the minibatch
        over the total number of examples of the minibatch ; zero one
        loss over the size of the minibatch

        :type y: theano.tensor.TensorType
        :param y: corresponds to a vector that gives for each example the
                  correct label
        """

        # check if y has same dimension of y_pred
        if y.ndim != self.y_pred.ndim:
            raise TypeError(
                'y should have the same shape as self.y_pred',
                ('y', y.type, 'y_pred', self.y_pred.type)
            )
        # check if y is of the correct datatype
        if y.dtype.startswith('int'):
            # the T.neq operator returns a vector of 0s and 1s, where 1
            # represents a mistake in prediction
            zeros = T.zeros_like(y)
            ones = T.ones_like(y)
            N = T.eq(y, zeros)
            P = T.eq(y, ones)
            FP = T.and_(N, T.eq(ones, self.y_pred))
            return T.mean(FP)/T.mean(N)
        else:
            raise NotImplementedError()
コード例 #14
0
def depth(r, b):
    # depth = 1 - s0 / pi; where s0 is from Agol+
    b = tt.abs_(b)
    r = tt.abs_(r)
    b2 = b ** 2
    r2 = r ** 2
    opr = 1 + r
    omr = 1 - r
    rmo = r - 1

    # Case 2
    a = kite_area(r, b)
    twoa = 2 * a
    k0 = tt.arctan2(twoa, rmo * opr + b2)
    k1 = tt.arctan2(twoa, omr * opr + b2)
    case2 = (k1 + r2 * k0 - a) / np.pi

    return tt.switch(
        tt.le(opr, b),
        tt.zeros_like(r),
        tt.switch(
            tt.and_(tt.lt(tt.abs_(omr), b), tt.lt(b, opr)),
            case2,
            tt.switch(tt.le(b, omr), r2, tt.ones_like(r)),
        ),
    )
コード例 #15
0
ファイル: utils.py プロジェクト: guozanhua/xnn
def theano_digitize(x, bins):
    """
    Equivalent to numpy digitize.

    Parameters
    ----------
    x : Theano tensor or array_like
        The array or matrix to be digitized
    bins : array_like
        The bins with which x should be digitized

    Returns
    -------
    A Theano tensor
        The indices of the bins to which each value in input array belongs.
    """
    binned = T.zeros_like(x) + len(bins)
    for i in range(len(bins)):
        bin = bins[i]
        if i == 0:
            binned = T.switch(T.lt(x, bin), i, binned)
        else:
            ineq = T.and_(T.ge(x, bins[i - 1]), T.lt(x, bin))
            binned = T.switch(ineq, i, binned)
    binned = T.switch(T.isnan(x), len(bins), binned)
    return binned
コード例 #16
0
ファイル: utils.py プロジェクト: eglxiang/xnn
def theano_digitize(x, bins):
    """
    Equivalent to numpy digitize.

    Parameters
    ----------
    x : Theano tensor or array_like
        The array or matrix to be digitized
    bins : array_like
        The bins with which x should be digitized

    Returns
    -------
    A Theano tensor
        The indices of the bins to which each value in input array belongs.
    """
    binned = T.zeros_like(x) + len(bins)
    for i in range(len(bins)):
        bin=bins[i]
        if i == 0:
            binned=T.switch(T.lt(x,bin),i,binned)
        else:
            ineq = T.and_(T.ge(x,bins[i-1]),T.lt(x,bin))
            binned=T.switch(ineq,i,binned)
    binned=T.switch(T.isnan(x), len(bins), binned)
    return binned
コード例 #17
0
ファイル: token_model.py プロジェクト: mswellhao/active_NER
	def logp_loss3(self, x, y, fake_label,neg_label, pos_ratio = 0.5): #adopt maxout  for  negative   
		# pos_rati0  means  pos examples weight (0.5 means  equal 1:1)


		print "adopt  positives  weight  ............. "+str(pos_ratio)
		y = y.dimshuffle((1,0))
		inx = x.dimshuffle((1,0))
		fake_mask = T.neq(y, fake_label)
		y = y*fake_mask

		pos_mask = T.and_(fake_mask, T.le(y, neg_label-1))*pos_ratio
		neg_mask = T.ge(y, neg_label)*(1- pos_ratio)


		pos_score, neg_score = self.structure2(inx,False)
		maxneg = T.max(neg_score, axis = -1)

		scores = T.concatenate((pos_score, maxneg.dimshuffle((0,1,'x'))), axis = 2)

		d3shape = scores.shape

		#seq*batch , label
		scores = scores.reshape((d3shape[0]*d3shape[1],  d3shape[2]))
		pro = T.nnet.softmax(scores)

		_logp = T.nnet.categorical_crossentropy(pro, y.flatten())

		_logp = _logp.reshape(fake_mask.shape)

		loss = (T.sum(_logp*pos_mask)+ T.sum(_logp*neg_mask))/ (T.sum(pos_mask)+T.sum(neg_mask))
		pos_loss = T.sum(_logp*pos_mask)
		neg_loss = T.sum(_logp*neg_mask)


		return loss, pos_loss, neg_loss
コード例 #18
0
ファイル: mlp.py プロジェクト: jannickep/HiggsChallenge
 def asimov_errors(self, y):
     # check if y has same dimension of y_pred
     if y.ndim != self.logRegressionLayer.y_pred.ndim:
         raise TypeError(
             'y should have the same shape as self.y_pred',
             ('y', y.type, 'y_pred', self.y_pred.type)
         )
     # check if y is of the correct datatype
     if y.dtype.startswith('int'):
         S = T.sum(T.eq(y,1))
         B = T.sum(T.eq(y,0))#*10000 # TODO: cross-section scaling
         s = T.sum(T.and_(T.eq(y,1),T.eq(self.logRegressionLayer.y_pred,1)))
         b = T.sum(T.and_(T.eq(y,0),T.eq(self.logRegressionLayer.y_pred,1)))#*10000 TODO: cross-section scaling
         return(S,B,s,b)
         # represents a mistake in prediction
     else:
         raise NotImplementedError()
コード例 #19
0
ファイル: activations.py プロジェクト: dkaravaev/wavelet-ccnn
    def call(self, inputs):
        real = get_realpart(inputs)
        imag = get_imagpart(inputs)

        cond = T.and_(real >= 0, imag >= 0)
        x = T.where(cond, real, self.zeros)
        y = T.where(cond, imag, self.zeros)
        return K.concatenate((x, y), axis=-1)
コード例 #20
0
ファイル: dist_math.py プロジェクト: alexander-belikov/pymc3
def incomplete_beta(a, b, value):
    '''Incomplete beta implementation
    Power series and continued fraction expansions chosen for best numerical
    convergence across the board based on inputs.
    '''
    machep = tt.constant(np.MachAr().eps, dtype='float64')
    one = tt.constant(1, dtype='float64')
    w = one - value

    ps = incomplete_beta_ps(a, b, value)

    flip = tt.gt(value, (a / (a + b)))
    aa, bb = a, b
    a = tt.switch(flip, bb, aa)
    b = tt.switch(flip, aa, bb)
    xc = tt.switch(flip, value, w)
    x = tt.switch(flip, w, value)

    tps = incomplete_beta_ps(a, b, x)
    tps = tt.switch(tt.le(tps, machep), one - machep, one - tps)

    # Choose which continued fraction expansion for best convergence.
    small = tt.lt(x * (a + b - 2.0) - (a - one), 0.0)
    cfe = incomplete_beta_cfe(a, b, x, small)
    w = tt.switch(small, cfe, cfe / xc)

    # Direct incomplete beta accounting for flipped a, b.
    t = tt.exp(
        a * tt.log(x) + b * tt.log(xc) +
        gammaln(a + b) - gammaln(a) - gammaln(b) +
        tt.log(w / a)
    )

    t = tt.switch(
        flip,
        tt.switch(tt.le(t, machep), one - machep, one - t),
        t
    )
    return tt.switch(
        tt.and_(flip, tt.and_(tt.le((b * x), one), tt.le(x, 0.95))),
        tps,
        tt.switch(
            tt.and_(tt.le(b * value, one), tt.le(value, 0.95)),
            ps,
            t))
コード例 #21
0
        def cohesion(X, inf=100.0):
            D = distance_tensor(X)
            E = direction_tensor(X)
            n, d = neighbourhood(X)

            F = T.zeros_like(E)
            D = T.stack([D, D, D], axis=2)
            d = T.stack([d, d, d], axis=2)

            c1 = T.lt(D, rb)
            c2 = T.and_(T.gt(D, rb), T.lt(D, ra))
            c3 = T.and_(T.gt(D, ra), T.lt(D, r0))

            F = T.set_subtensor(F[c1], -E[c1])
            F = T.set_subtensor(F[c2], 0.25 * (D[c2] - re) / (ra - re) * E[c2])
            F = T.set_subtensor(F[c3], E[c3])

            return T.sum(d * F, axis=0)
コード例 #22
0
ファイル: keplerian.py プロジェクト: dfm/exoplanet
    def in_transit(self, t, r=0.0, texp=None):
        """Get a list of timestamps that are in transit

        Args:
            t (vector): A vector of timestamps to be evaluated.
            r (Optional): The radii of the planets.
            texp (Optional[float]): The exposure time.

        Returns:
            The indices of the timestamps that are in transit.

        """

        z = tt.zeros_like(self.a)
        r = tt.as_tensor_variable(r) + z
        R = self.r_star + z

        # Wrap the times into time since transit
        hp = 0.5 * self.period
        dt = tt.mod(self._warp_times(t) - self.t0 + hp, self.period) - hp

        if self.ecc is None:
            # Equation 14 from Winn (2010)
            k = r / R
            arg = tt.square(1 + k) - tt.square(self.b)
            factor = R / (self.a * self.sin_incl)
            hdur = hp * tt.arcsin(factor * tt.sqrt(arg)) / np.pi
            t_start = -hdur
            t_end = hdur
            flag = z

        else:
            M_contact = self.contact_points_op(
                self.a, self.ecc, self.cos_omega, self.sin_omega,
                self.cos_incl + z, self.sin_incl + z, R + r)
            flag = M_contact[2]

            t_start = (M_contact[0] - self.M0) / self.n
            t_start = tt.mod(t_start + hp, self.period) - hp
            t_end = (M_contact[1] - self.M0) / self.n
            t_end = tt.mod(t_end + hp, self.period) - hp

            t_start = tt.switch(tt.gt(t_start, 0.0),
                                t_start - self.period, t_start)
            t_end = tt.switch(tt.lt(t_end, 0.0),
                              t_end + self.period, t_end)

        if texp is not None:
            t_start -= 0.5*texp
            t_end += 0.5*texp

        mask = tt.any(tt.and_(dt >= t_start, dt <= t_end), axis=-1)
        result = ifelse(tt.all(tt.eq(flag, 0)),
                        tt.arange(t.size)[mask],
                        tt.arange(t.size))

        return result
コード例 #23
0
ファイル: ladder.py プロジェクト: fulldecent/LRE
def objective(y_true, y_pred, P, Q, alpha=0., beta=0.15, dbeta=0., gamma=0.01, gamma1=-1., poos=0.23, eps=1e-6):
    '''Expects a binary class matrix instead of a vector of scalar classes.
    '''

    beta = np.float32(beta)
    dbeta = np.float32(dbeta)
    gamma = np.float32(gamma)
    poos = np.float32(poos)
    eps = np.float32(eps)

    # scale preds so that the class probas of each sample sum to 1
    y_pred += eps
    y_pred /= y_pred.sum(axis=-1, keepdims=True)

    y_true = T.cast(y_true.flatten(), 'int64')
    y1 = T.and_(T.gt(y_true, 0), T.le(y_true, Q))  # in-set
    y0 = T.or_(T.eq(y_true, 0), T.gt(y_true, Q))  # out-of-set or unlabeled
    y0sum = y0.sum() + eps  # number of oos
    y1sum = y1.sum() + eps  # number of in-set
    # we want to reduce cross entrophy of labeled data
    # convert all oos/unlabeled to label=0
    cost0 = T.nnet.categorical_crossentropy(y_pred, T.switch(y_true <= Q, y_true, 0))
    cost0 = T.dot(y1, cost0) / y1sum  # average cost per labeled example

    if alpha:
        cost1 = T.nnet.categorical_crossentropy(y_pred, y_pred)
        cost1 = T.dot(y0, cost1) / y0sum  # average cost per labeled example
        cost0 += alpha*cost1

    # we want to increase the average entrophy in each batch
    # average over batch
    if beta:
        y_pred_avg0 = T.dot(y0, y_pred) / y0sum
        y_pred_avg0 = T.clip(y_pred_avg0, eps, np.float32(1) - eps)
        y_pred_avg0 /= y_pred_avg0.sum(axis=-1, keepdims=True)
        cost2 = T.nnet.categorical_crossentropy(y_pred_avg0.reshape((1,-1)), P-dbeta)[0] # [None,:]
        cost2 = T.switch(y0sum > 0.5, cost2, 0.)  # ignore cost2 if no samples
        cost0 += beta*cost2

    # binary classifier score
    if gamma:
        y_pred0 = T.clip(y_pred[:,0], eps, np.float32(1) - eps)
        if gamma1 < 0.:
            cost3 = - T.dot(poos*y0,T.log(y_pred0)) - T.dot(np.float32(1)-poos*y0.T,T.log(np.float32(1)-y_pred0))
            cost3 /= y_pred.shape[0]
            cost0 += gamma*cost3
        elif gamma1 > 0.:
            cost3 = - T.dot(poos*y0,T.log(y_pred0)) - T.dot((np.float32(1)-poos)*y0,T.log(np.float32(1)-y_pred0))
            cost3 /= y0sum
            cost31 =  - T.dot(y1,T.log(np.float32(1)-y_pred0))
            cost3 /= y1sum
            cost0 += gamma*cost3 + gamma1*cost31
        else:  # gamma1 == 0.
            cost3 = - T.dot(poos*y0,T.log(y_pred0)) - T.dot((np.float32(1)-poos)*y0, T.log(np.float32(1)-y_pred0))
            cost3 /= y0sum
            cost0 += gamma*cost3
    return cost0
コード例 #24
0
ファイル: RecurrentTransform.py プロジェクト: atuxhe/returnn
 def dtw(i, q_p, b_p, Q, D, inf):
   i0 = T.eq(i, 0)
   # inf = T.cast(1e10,'float32') * T.cast(T.switch(T.eq(self.n,0), T.switch(T.eq(i,0), 0, 1), 1), 'float32')
   penalty = T.switch(T.and_(T.neg(n0), i0), big, T.constant(0.0, 'float32'))
   loop = T.constant(0.0, 'float32') + q_p
   forward = T.constant(0.0, 'float32') + T.switch(T.or_(n0, i0), 0, Q[i - 1])
   opt = T.stack([loop, forward])
   k_out = T.cast(T.argmin(opt, axis=0), 'int32')
   return opt[k_out, T.arange(opt.shape[1])] + D[i] + penalty, k_out
コード例 #25
0
 def dtw(i, q_p, b_p, Q, D, inf):
   i0 = T.eq(i, 0)
   # inf = T.cast(1e10,'float32') * T.cast(T.switch(T.eq(self.n,0), T.switch(T.eq(i,0), 0, 1), 1), 'float32')
   penalty = T.switch(T.and_(T.neg(n0), i0), big, T.constant(0.0, 'float32'))
   loop = T.constant(0.0, 'float32') + q_p
   forward = T.constant(0.0, 'float32') + T.switch(T.or_(n0, i0), 0, Q[i - 1])
   opt = T.stack([loop, forward])
   k_out = T.cast(T.argmin(opt, axis=0), 'int32')
   return opt[k_out, T.arange(opt.shape[1])] + D[i] + penalty, k_out
コード例 #26
0
ファイル: classification.py プロジェクト: fdoperezi/santander
def jaccard_similarity(y_true, y_predicted):
    """
    y_true: tensor ({1, 0})
    y_predicted: tensor ({1, 0})
    note - we round predicted because float probabilities would not work
    """
    y_predicted = T.round(y_predicted).astype(theano.config.floatX)
    either_nonzero = T.or_(T.neq(y_true, 0), T.neq(y_predicted, 0))
    return T.and_(T.neq(y_true, y_predicted), either_nonzero).sum(axis=-1, dtype=theano.config.floatX) / either_nonzero.sum(axis=-1, dtype=theano.config.floatX)
コード例 #27
0
def masked_categorical_accuracy(y_true, y_pred, mask):

    y_true = K.argmax(y_true, axis=-1)
    y_pred = K.argmax(y_pred, axis=-1)

    error = K.equal(y_true, y_pred)

    mask_template = T.and_(T.neq(y_true,  mask), T.neq(y_true, 0)).nonzero()

    return K.mean(error[mask_template])
コード例 #28
0
def masked_categorical_accuracy(y_true, y_pred, mask):

    y_true = K.argmax(y_true, axis=-1)
    y_pred = K.argmax(y_pred, axis=-1)

    error = K.equal(y_true, y_pred)

    mask_template = T.and_(T.neq(y_true, mask), T.neq(y_true, 0)).nonzero()

    return K.mean(error[mask_template])
コード例 #29
0
def build_model(shared_params, options, other_params):
    """
    Build the complete neural network model and return the symbolic variables
    """
    # symbolic variables
    x = tensor.matrix(name="x", dtype=floatX)
    y1 = tensor.iscalar(name="y1")
    y2 = tensor.iscalar(name="y2")

    # lstm cell
    (ht, ct) = lstm_cell(x, shared_params, options, other_params)  # gets the ht, ct
    # softmax 1 i.e. frame type prediction
    activation = tensor.dot(shared_params['softmax1_W'], ht).transpose() + shared_params['softmax1_b']
    frame_pred = tensor.nnet.softmax(activation) # .transpose()

    # softmax 2 i.e. gesture class prediction
    #

    # predicted probability for frame type
    f_pred_prob = theano.function([x], frame_pred, name="f_pred_prob")
    # predicted frame type
    f_pred = theano.function([x], frame_pred.argmax(), name="f_pred")

    # cost
    cost = ifelse(tensor.eq(y1, 1), 
                  -tensor.log(frame_pred[0, 0] + options['log_offset']) * other_params['begin_cost_factor'],
                  ifelse(tensor.eq(y1, 2), 
                         -tensor.log(frame_pred[0, 1] + options['log_offset']) * other_params['end_cost_factor'],
                         ifelse(tensor.and_(tensor.eq(y1, 3), tensor.eq(other_params['near_boundary'], 1)),
                                -tensor.log(frame_pred[0, 2] + frame_pred[0, 0] + options['log_offset']),
                                ifelse(tensor.and_(tensor.eq(y1, 3), tensor.eq(other_params['near_boundary'], 2)),
                                       -tensor.log(frame_pred[0, 2] + frame_pred[0, 1] + options['log_offset']),
                                       ifelse(tensor.eq(y1, 3), 
                                              -tensor.log(frame_pred[0, 2] + options['log_offset']),
                                              tensor.abs_(tensor.log(y1)))))), name='ifelse_cost')  
    # ^ last else is a dummy value above. y1 = 1/2/3 based on the frame type

    # function for output of the currect lstm cell and softmax prediction
    f_model_cell_output = theano.function([x], (ht, ct, frame_pred), name="f_model_cell_output")
    # return the model symbolic variables and theano functions
    return x, y1, y2, f_pred_prob, f_pred, cost, f_model_cell_output
コード例 #30
0
def spatial_gradient(prediction, target, l=0.1,m=2.):
    # Flatten input to make calc easier
    pred = prediction
    pred_v = pred.flatten(2)
    target_v = target.flatten(2)
    # Compute mask
    mask = T.gt(target_v,0.)
    # Compute n of valid pixels
    n_valid = T.sum(mask, axis=1)
    # Apply mask and log transform
    m_pred = pred_v * mask
    m_t = T.switch(mask, T.log(target_v),0.)
    d = m_pred - m_t

    # Define scale invariant cost
    scale_invariant_cost = (T.sum(n_valid * T.sum(d**2, axis=1)) - l*T.sum(T.sum(d, axis=1)**2))/ T.maximum(T.sum(n_valid**2), 1)

    # Add spatial gradient components from D. Eigen DNL

    # Squeeze in case
    if pred.ndim == 4:
        pred = pred[:,0,:,:]
    if target.ndim == 4:
        target = target[:,0,:,:]
    # Mask in tensor form
    mask_tensor = T.gt(target,0.)
    # Project into log space
    target = T.switch(mask_tensor, T.log(target),0.)
    # Stepsize
    h = 1
    # Compute spatial gradients symbolically
    p_di = (pred[:,h:,:] - pred[:,:-h,:]) * (1 / np.float32(h))
    p_dj = (pred[:,:,h:] - pred[:,:,:-h]) * (1 / np.float32(h))
    t_di = (target[:,h:,:] - target[:,:-h,:]) * (1 / np.float32(h))
    t_dj = (target[:,:,h:] - target[:,:,:-h]) * (1 / np.float32(h))
    m_di = T.and_(mask_tensor[:,h:,:], mask_tensor[:,:-h,:])
    m_dj = T.and_(mask_tensor[:,:,h:], mask_tensor[:,:,:-h])
    # Define spatial grad cost
    grad_cost = T.sum(m_di * (p_di - t_di)**2) / T.sum(m_di) + T.sum(m_dj * (p_dj - t_dj)**2) / T.sum(m_dj)
    # Compute final expression
    return scale_invariant_cost + grad_cost
コード例 #31
0
ファイル: nn_policy_updaters.py プロジェクト: ahefnycmu/rpsp
    def _t_ratio_limits(
        self,
        t_single_traj_info,
    ):
        r_max = globalconfig.vars.args.r_max
        r_min = 1 / float(r_max)

        prob_ratio = self._t_prob_ratio(t_single_traj_info)
        upper_bound_valid = T.lt(T.max(prob_ratio), r_max)
        lower_bound_valid = T.gt(T.min(prob_ratio), r_min)
        valid = T.switch(T.and_(lower_bound_valid, upper_bound_valid), 1, -1)
        return valid
コード例 #32
0
ファイル: cnnLayerTypes.py プロジェクト: yochju/deepmedic
    def multiclassRealPosAndNegAndTruePredPosNegTraining0OrValidation1(
            self, y, training0OrValidation1):
        """
	The returned list has (numberOfClasses)x4 integers: >numberOfRealPositives, numberOfRealNegatives, numberOfTruePredictedPositives, numberOfTruePredictedNegatives< for each class (incl background).
	For class_i == 0 (backgr), what is reported is the WHOLE rp,rn,tpp,tpn. ie, as calculated considering background VS all other classes.
	Order in the list is the natural order of the classes (ie class-0-WHOLE RP,RN,TPP,TPN, class-1 RP,RN,TPP,TPN, class-2 RP,RN,TPP,TPN ...)
	"""
        returnedListWithNumberOfRpRnPpPnForEachClass = []

        for class_i in xrange(0, self.numberOfOutputClasses):
            #Number of Real Positive, Real Negatives, True Predicted Positives and True Predicted Negatives are reported PER CLASS (first for WHOLE).
            vectorOneAtRealPositives = T.gt(y, 0) if class_i == 0 else T.eq(
                y, class_i)
            vectorOneAtRealNegatives = T.eq(y, 0) if class_i == 0 else T.neq(
                y, class_i)

            if training0OrValidation1 == 0:  #training:
                yPredToUse = self.y_pred
            else:  #validation
                yPredToUse = self.y_pred_inference

            vectorOneAtPredictedPositives = T.gt(
                yPredToUse, 0) if class_i == 0 else T.eq(yPredToUse, class_i)
            vectorOneAtPredictedNegatives = T.eq(
                yPredToUse, 0) if class_i == 0 else T.neq(yPredToUse, class_i)
            vectorOneAtTruePredictedPositives = T.and_(
                vectorOneAtRealPositives, vectorOneAtPredictedPositives)
            vectorOneAtTruePredictedNegatives = T.and_(
                vectorOneAtRealNegatives, vectorOneAtPredictedNegatives)

            returnedListWithNumberOfRpRnPpPnForEachClass.append(
                T.sum(vectorOneAtRealPositives))
            returnedListWithNumberOfRpRnPpPnForEachClass.append(
                T.sum(vectorOneAtRealNegatives))
            returnedListWithNumberOfRpRnPpPnForEachClass.append(
                T.sum(vectorOneAtTruePredictedPositives))
            returnedListWithNumberOfRpRnPpPnForEachClass.append(
                T.sum(vectorOneAtTruePredictedNegatives))

        return returnedListWithNumberOfRpRnPpPnForEachClass
コード例 #33
0
ファイル: wolfe.py プロジェクト: vamsijkrishna/neupy
def sequential_and(*conditions):
    """ Use ``and`` operator between all conditions. Function is just
    a syntax sugar that make long Theano logical conditions looks
    less ugly.

    Parameters
    ----------
    *conditions
        Conditions that returns ``True`` or ``False``
    """
    first_condition, other_conditions = conditions[0], conditions[1:]
    if not other_conditions:
        return first_condition
    return T.and_(first_condition, sequential_and(*other_conditions))
コード例 #34
0
    def apply_border_conditions(self, cursors, stack_mask, mask, action):
        # Обрабатываем случаи, когда у агента есть только один выбор (закончились входные данные или в стеке нет двух элементов)
        # Или вообще не надо делать выбор (вся строка посчитана)
        sentence_length = K.sum(mask, axis=1)
        cursor_pos = K.sum(cursors, axis=1)
        stack_size = K.sum(stack_mask, axis=1)

        input_is_empty = TS.eq(sentence_length - cursor_pos, -1)
        stack_is_empty = TS.le(stack_size, 1)
        action = TS.switch(input_is_empty, 0, action)
        action = TS.switch(stack_is_empty, 1, action)
        no_action = TS.and_(input_is_empty, stack_is_empty)
        policy_calculated = 1 - TS.or_(input_is_empty, stack_is_empty)
        return action, no_action, policy_calculated
コード例 #35
0
ファイル: obstacle.py プロジェクト: azane/chomp
def th_distance_field_cost(sdf: tt.TensorVariable, eps: float):
    # Signed distance field cost function, as presented in paper.

    # Given a signed distance field, and an epsilon distance, compute the obstacle cost.
    sdneg = sdf < 0
    sdeps = tt.and_(0 <= sdf, sdf <= eps)
    # sdclr = eps < sdf

    sdneg_v = -sdf + .5*eps
    sdeps_v = .5*eps**-1.*(sdf - eps)**2.
    # sdclr_v = 0.

    # Again, not ideal to "index" this way (cz compute everything), but easy with theano.
    return sdneg_v * sdneg + sdeps_v * sdeps  # + sdclr_v * sdclr
コード例 #36
0
ファイル: cnnLayerTypes.py プロジェクト: pliu007/deepmedic
    def realPosAndNegAndTruePredPosNegTraining0OrValidation1(self, y, training0OrValidation1):
	#***Implemented only for binary***. For multiclass, it counts real-positives as everything not-background, and as true positives the true predicted lesion (ind of class).
        vectorOneAtRealPositives = T.gt(y,0)
        vectorOneAtRealNegatives = T.eq(y,0)
	if training0OrValidation1 == 0 : #training:
		yPredToUse = self.y_pred
	else: #validation
		yPredToUse = self.y_pred_inference
        vectorOneAtPredictedPositives = T.gt(yPredToUse,0)
        vectorOneAtPredictedNegatives = T.eq(yPredToUse,0)
        vectorOneAtTruePredictedPositives = T.and_(vectorOneAtRealPositives,vectorOneAtPredictedPositives)
        vectorOneAtTruePredictedNegatives = T.and_(vectorOneAtRealNegatives,vectorOneAtPredictedNegatives)
            
        numberOfRealPositives = T.sum(vectorOneAtRealPositives)
        numberOfRealNegatives = T.sum(vectorOneAtRealNegatives)
        numberOfTruePredictedPositives = T.sum(vectorOneAtTruePredictedPositives)
        numberOfTruePredictedNegatives = T.sum(vectorOneAtTruePredictedNegatives)

	return [ numberOfRealPositives,
                 numberOfRealNegatives,
                 numberOfTruePredictedPositives,
                 numberOfTruePredictedNegatives
               ]
コード例 #37
0
def berhu_spatial(predictions, targets, s=0.2, l=0., m=10., gw=0.5):
    # Compute mask
    mask = T.gt(targets, l) * T.lt(targets,m)

    # Compute n of valid pixels
    n_valid = T.sum(mask)
    r = (predictions - targets) * mask
    c = s * T.max(T.abs_(r))
    a_r = T.abs_(r)
    b = T.switch(T.lt(a_r, c), a_r, ((r**2) + (c**2))/(2*c))
    
    pixel_cost = T.sum(b)/n_valid
    
    # Gradient cost
    h = 1
    pred = predictions
    target = targets
    
    if pred.ndim == 4:
        pred = pred[:,0,:,:]
    if target.ndim == 4:
        target = target[:,0,:,:]
    
    # Recompute mask
    mask = T.gt(target, l) * T.lt(target,m)
        
    p_di = (pred[:,h:,:] - pred[:,:-h,:]) * (1 / np.float32(h))
    p_dj = (pred[:,:,h:] - pred[:,:,:-h]) * (1 / np.float32(h))
    t_di = (target[:,h:,:] - target[:,:-h,:]) * (1 / np.float32(h))
    t_dj = (target[:,:,h:] - target[:,:,:-h]) * (1 / np.float32(h))
    m_di = T.and_(mask[:,h:,:], mask[:,:-h,:])
    m_dj = T.and_(mask[:,:,h:], mask[:,:,:-h])
    # Define spatial grad cost
    grad_cost = T.sum(m_di * T.abs_(p_di - t_di)) / T.sum(m_di) + T.sum(m_dj * T.abs_(p_dj - t_dj)) / T.sum(m_dj)
    
    return gw * grad_cost + pixel_cost
コード例 #38
0
    def compute_test_VOC_loss(self):
	  # works for 0-1 loss
          all_y_pred = numpy.empty([])
	  for i in xrange(self.n_test_batches):
		[y_pred, test_loss] = self.test_model(i)
		if i==0:
		    all_y_pred = y_pred
		else:
         	    all_y_pred = numpy.concatenate((all_y_pred, y_pred))
          print all_y_pred
	  print all_y_pred.shape
	  F = T.sum(T.neq(self.test_set_y, all_y_pred))
          TP = T.sum(T.and_(T.eq(self.test_set_y, 1), T.eq(all_y_pred, 1)))
	  result =  TP/T.cast(TP+F, theano.config.floatX)
          print 'Print result is ', result.eval()
	  return result.eval() 
コード例 #39
0
    def _rejection_sampling(self, output_z, alpha, idx):
        eps = self.srng.normal(idx.shape, dtype=alpha.dtype)
        U = self.srng.uniform(idx.shape,
                              low=epsilon(),
                              high=1 - epsilon(),
                              dtype=alpha.dtype)
        z, judge1, judge2 = self._h(alpha[idx], eps)

        _idx_binary = T.and_(T.lt(U, judge1), T.gt(eps, judge2))
        output_z = T.set_subtensor(output_z[idx[_idx_binary.nonzero()]],
                                   z[_idx_binary.nonzero()])

        # update idx
        idx = idx[T.eq(0, _idx_binary).nonzero()]

        return output_z, idx
コード例 #40
0
ファイル: distribution_samples.py プロジェクト: Seb-Leb/Tars
    def _rejection_sampling(self, output_z, alpha, idx):
        eps = self.srng.normal(idx.shape, dtype=alpha.dtype)
        U = self.srng.uniform(idx.shape,
                              low=epsilon(),
                              high=1 - epsilon(),
                              dtype=alpha.dtype)
        z, judge1, judge2 = self._h(alpha[idx], eps)

        _idx_binary = T.and_(T.lt(U, judge1), T.gt(eps, judge2))
        output_z = T.set_subtensor(output_z[idx[_idx_binary.nonzero()]],
                                   z[_idx_binary.nonzero()])

        # update idx
        idx = idx[T.eq(0, _idx_binary).nonzero()]

        return output_z, idx
コード例 #41
0
ファイル: wolfe.py プロジェクト: LiuFang816/SALSTM_py_data
    def search_iteration_step(x_previous, x_current, y_previous, y_current,
                              y_deriv_previous, is_first_iteration, x_star):

        y_deriv_current = f_deriv(x_current)

        x_new = x_current * asfloat(2)
        y_new = f(x_new)

        condition1 = T.or_(
            y_current > (y0 + c1 * x_current * y_deriv_0),
            T.and_(
                y_current >= y_previous,
                bitwise_not(is_first_iteration),
            ))
        condition2 = T.abs_(y_deriv_current) <= -c2 * y_deriv_0
        condition3 = y_deriv_current >= zero

        x_star = ifelse(
            condition1,
            zoom(x_previous, x_current, y_previous, y_current,
                 y_deriv_previous, f, f_deriv, y0, y_deriv_0, c1, c2),
            ifelse(
                condition2,
                x_current,
                ifelse(
                    condition3,
                    zoom(x_current, x_previous, y_current, y_previous,
                         y_deriv_current, f, f_deriv, y0, y_deriv_0, c1, c2),
                    x_new,
                ),
            ),
        )
        y_deriv_previous_new = ifelse(condition1, y_deriv_previous,
                                      y_deriv_current)

        is_any_condition_satisfied = sequential_or(condition1, condition2,
                                                   condition3)
        y_current_new = ifelse(is_any_condition_satisfied, y_current, y_new)
        return ([
            x_current, x_new, y_current, y_current_new, y_deriv_previous_new,
            theano_false, x_star
        ],
                theano.scan_module.scan_utils.until(
                    sequential_or(
                        T.eq(x_new, zero),
                        is_any_condition_satisfied,
                    )))
コード例 #42
0
ファイル: train_voc_cnn.py プロジェクト: vpetrescu/proto_cnn
    def compute_validation_VOC_loss(self):
        """Added validation loss"""
        # works for 0-1 loss
        all_y_pred = numpy.empty([])
        for i in xrange(self.n_valid_batches):
            y_pred = self.validate_model(i)
            if i == 0:
                all_y_pred = y_pred
            else:
                all_y_pred = numpy.concatenate((all_y_pred, y_pred))
        print all_y_pred

        F = T.sum(T.neq(self.valid_set_y, all_y_pred))
        TP = T.sum(T.and_(T.eq(self.valid_set_y, 1), T.eq(all_y_pred, 1)))
        result = TP/T.cast(TP+F, theano.config.floatX)
        print 'Print result is ', result.eval()
        return result.eval()
コード例 #43
0
ファイル: token_model.py プロジェクト: coder35/active_NER
    def logp_loss2(self,
                   x,
                   y,
                   fake_label,
                   neg_label,
                   ismax=True):  # neg_label is the maximum label id
        y = y.dimshuffle((1, 0))
        inx = x.dimshuffle((1, 0))
        fake_mask = T.neq(y, fake_label)
        y = y * fake_mask

        pos_mask = T.and_(fake_mask, T.le(y, neg_label - 1))
        neg_mask = T.ge(y, neg_label)
        iny = y * pos_mask

        pos_pro, neg_pro = self.structure2(inx)

        if ismax:
            neg_pro = T.max(neg_pro, axis=-1)
            #pos_pro : sequence * batch * pos label
            #neg_pro :sequence *batch
            pos_logp = T.nnet.categorical_crossentropy(
                pos_pro.reshape(
                    (pos_pro.shape[0] * pos_pro.shape[1], pos_pro.shape[2])),
                iny.flatten())
            # sequence * batch
            pos_logp = pos_logp.reshape(y.shape) * pos_mask
            pos_loss = T.sum(pos_logp)
            neg_loss = 0 - T.sum(T.log(neg_pro) * neg_mask)
            loss = (pos_loss + neg_loss) / (T.sum(pos_mask) + T.sum(neg_mask))

        else:
            pro = T.concatenate((pos_pro, neg_pro), axis=2)
            pro = pro.reshape((pro.shape[0] * pro.shape[1], pro.shape[2]))

            y = y.flatten()
            losslist = T.nnet.categorical_crossentropy(pro, y)
            losslist = losslist.reshape(fake_mask.shape)
            losslist = losslist * fake_mask

            loss = T.sum(losslist) / T.sum(fake_mask)

        return loss
コード例 #44
0
    def compute_test_VOC_loss(self):
	  # works for 0-1 loss
          all_y_pred = numpy.empty([])
	  for i in xrange(self.n_test_batches):
		[y_pred, test_loss] = self.test_model_result(i)
		if i==0:
		    all_y_pred = y_pred
		else:
         	    all_y_pred = numpy.concatenate((all_y_pred, y_pred))
          print all_y_pred
	  print all_y_pred.shape
	  F = T.sum(T.neq(self.test_set_y, all_y_pred))
          TP = T.sum(T.and_(T.eq(self.test_set_y, 1), T.eq(all_y_pred, 1)))
	  result =  TP/T.cast(TP+F, theano.config.floatX)
          print 'Print result is ', result.eval()
	  # open file and write array to file
	  f = open(self.cached_weights_file + "_results.txt","a")
	  numpy.savetxt(f, all_y_pred)
          f.close()
	  return result.eval() 
コード例 #45
0
ファイル: nn.py プロジェクト: CollinM/asap-sas
def quadratic_weighted_kappa_loss(y_true, y_pred):
    min_rating = T.minimum(T.min(y_true), T.min(y_pred))
    max_rating = T.maximum(T.max(y_true), T.max(y_pred))

    hist_true = T.bincount(y_true, minlength=max_rating)
    hist_pred = T.bincount(y_pred, minlength=max_rating)
    num_ratings = (max_rating - min_rating) + 1
    num_scored = float(len(y_true))

    numerator = T.zeros(1)
    denominator = T.zeros(1)
    z = T.zeros(len(y_true))
    for i_true in range(min_rating, max_rating + 1):
        for j_pred in range(min_rating, max_rating + 1):
            expected = T.true_div(T.mul(hist_true[i_true], hist_pred[j_pred]), num_scored)
            d = T.true_div(T.sqr(i_true - j_pred), T.sqr(num_ratings - 1.))
            conf_mat_cell = T.sum(T.and_(T.eq(T.sub(y_true, i_true), z), T.eq(T.sub(y_pred, j_pred), z)))
            numerator = T.add(numerator, T.true_div(T.mul(d, conf_mat_cell), num_scored))
            denominator = T.add(denominator, T.true_div(T.mul(d, expected), num_scored))

    return T.true_div(numerator, denominator)
コード例 #46
0
ファイル: token_model.py プロジェクト: coder35/active_NER
    def logp_loss3(self,
                   x,
                   y,
                   fake_label,
                   neg_label,
                   pos_ratio=0.5):  #adopt maxout  for  negative
        # pos_rati0  means  pos examples weight (0.5 means  equal 1:1)

        print "adopt  positives  weight  ............. " + str(pos_ratio)
        y = y.dimshuffle((1, 0))
        inx = x.dimshuffle((1, 0))
        fake_mask = T.neq(y, fake_label)
        y = y * fake_mask

        pos_mask = T.and_(fake_mask, T.le(y, neg_label - 1)) * pos_ratio
        neg_mask = T.ge(y, neg_label) * (1 - pos_ratio)

        pos_score, neg_score = self.structure2(inx, False)
        maxneg = T.max(neg_score, axis=-1)

        scores = T.concatenate((pos_score, maxneg.dimshuffle((0, 1, 'x'))),
                               axis=2)

        d3shape = scores.shape

        #seq*batch , label
        scores = scores.reshape((d3shape[0] * d3shape[1], d3shape[2]))
        pro = T.nnet.softmax(scores)

        _logp = T.nnet.categorical_crossentropy(pro, y.flatten())

        _logp = _logp.reshape(fake_mask.shape)

        loss = (T.sum(_logp * pos_mask) +
                T.sum(_logp * neg_mask)) / (T.sum(pos_mask) + T.sum(neg_mask))
        pos_loss = T.sum(_logp * pos_mask)
        neg_loss = T.sum(_logp * neg_mask)

        return loss, pos_loss, neg_loss
コード例 #47
0
ファイル: token_model.py プロジェクト: mswellhao/active_NER
	def logp_loss2(self,x,y, fake_label, neg_label, ismax = True):# neg_label is the maximum label id
		y = y.dimshuffle((1,0))
		inx = x.dimshuffle((1,0))
		fake_mask = T.neq(y, fake_label)
		y = y*fake_mask


		pos_mask = T.and_(fake_mask, T.le(y, neg_label-1))
		neg_mask = T.ge(y, neg_label)
		iny = y*pos_mask

		pos_pro, neg_pro = self.structure2(inx)

		if ismax:
			neg_pro = T.max(neg_pro, axis = -1)
			#pos_pro : sequence * batch * pos label
			#neg_pro :sequence *batch
			pos_logp = T.nnet.categorical_crossentropy(pos_pro.reshape((pos_pro.shape[0]*pos_pro.shape[1], pos_pro.shape[2])), iny.flatten())
			# sequence * batch 
			pos_logp = pos_logp.reshape(y.shape)*pos_mask  
			pos_loss = T.sum(pos_logp)
			neg_loss = 0 -  T.sum(T.log(neg_pro)*neg_mask)
			loss = (pos_loss + neg_loss)/ (T.sum(pos_mask)+T.sum(neg_mask))

		else:
			pro = T.concatenate((pos_pro, neg_pro), axis = 2)
			pro = pro.reshape((pro.shape[0]*pro.shape[1], pro.shape[2]))

			y = y.flatten()
			losslist = T.nnet.categorical_crossentropy(pro, y)
			losslist = losslist.reshape(fake_mask.shape)
			losslist = losslist*fake_mask

			loss = T.sum(losslist) / T.sum(fake_mask)



		return loss
コード例 #48
0
ファイル: dnn.py プロジェクト: wolfhu/StockPredictor
def predict(model_path):
    with open(model_path, 'r') as f:
        network = cPickle.load(f)

    target_var = T.imatrix('y')
    predict_prediction = get_output(network, deterministic=True)
    predict_acc = binary_accuracy(predict_prediction, target_var).mean()

    # calculate win rate
    win_rate_result1 = []
    win_rate_result2 = []
    for win_rate_threhold in [0.5, 0.6, 0.7, 0.8, 0.9]:
        tmp1 = T.sum(T.switch(T.and_(T.gt(predict_prediction, win_rate_threhold), T.eq(target_var, 1)), 1, 0),
                     dtype=theano.config.floatX)
        tmp2 = T.sum(T.switch(T.gt(predict_prediction, win_rate_threhold), 1, 0), dtype=theano.config.floatX)
        test_win_rate = (tmp1 + 0.00001) / (tmp2 + 0.00001)
        win_rate_result1.append(test_win_rate)
        win_rate_result2.append(tmp1)

    input_layer = get_all_layers(network)[0]
    predict = theano.function(inputs=[input_layer.input_var, target_var],
                              outputs=[predict_prediction, predict_acc, T.as_tensor_variable(win_rate_result1), T.as_tensor_variable(win_rate_result2)],
                              on_unused_input='warn')
    X, y, labels, values, _, _, _, _, _, _ = load_dataset('../../data/predict.txt')
    predict_prediction, predict_acc, win_rate_result1, win_rate_result2 = predict(X, y)

    for ix in range(len([0.5, 0.6, 0.7, 0.8, 0.9])):
        sys.stdout.write("  predict win rate loss:\t\t\t{}\n".format(win_rate_result1[ix]))
        sys.stdout.write("  predict possitive num:\t\t\t{}\n".format(win_rate_result2[ix]))
    sys.stdout.write("  predict accuracy:\t\t\t{} %\n".format(predict_acc * 100))

    #output predict result
    with open('../../data/prediction', 'w') as f:
        for ix in xrange(len(labels)):
            line = str(labels[ix]) + '\t' + str(values[ix]) + '\t' + str(predict_prediction[ix][0]) + '\n'
            f.write(line)
    sys.stdout.flush()
コード例 #49
0
    def in_transit(self, t, r=0.0, texp=None):
        """Get a list of timestamps that are in transit

        Args:
            t (vector): A vector of timestamps to be evaluated.
            r (Optional): The radii of the planets.
            texp (Optional[float]): The exposure time.

        Returns:
            The indices of the timestamps that are in transit.

        """
        z = tt.zeros_like(self.a)
        r = tt.as_tensor_variable(r) + z
        R = self.r_star + z

        if self.ecc is None:
            M_contact = self.contact_points_op(self.a, self.incl + z, r, R)
        else:
            M_contact = self.contact_points_op(self.a, self.ecc, self.omega,
                                               self.incl + z, r, R)

        # Wrap the times into time since transit
        hp = 0.5 * self.period
        t_start = (M_contact[0] - self.M0) / self.n
        t_start = tt.mod(t_start + hp, self.period) - hp
        t_end = (M_contact[3] - self.M0) / self.n
        t_end = tt.mod(t_end + hp, self.period) - hp
        dt = tt.mod(self._warp_times(t) - self.t0 + hp, self.period) - hp
        if texp is not None:
            t_start -= 0.5 * texp
            t_end += 0.5 * texp

        mask = tt.any(tt.and_(dt >= t_start, dt <= t_end), axis=-1)

        return tt.arange(t.size)[mask]
コード例 #50
0
ファイル: cnn.py プロジェクト: vishalbelsare/cnn-ecg-paf
 def calculate_specificity(self, x, y):
     true_negatives = T.sum(T.and_(T.eq(x, 0), T.eq(y, 0)))
     specificity = true_negatives / T.sum(T.eq(y, 0))
     return specificity
コード例 #51
0
ファイル: cnn.py プロジェクト: vishalbelsare/cnn-ecg-paf
 def calculate_sensitivity(self, x, y):
     true_positives = T.sum(T.and_(T.eq(x, 1), T.eq(y, 1)))
     sensitivity = true_positives / T.sum(T.eq(y, 1))
     return sensitivity
コード例 #52
0
ファイル: theano_backend.py プロジェクト: kundajelab/keras
def and_(x, y):
    return T.and_(x, y)
コード例 #53
0
 def __call__(self, g_loss, d_loss):
     d_loss = ifelse(
         T.and_(g_loss > self.high, d_loss < 1.5 * self.high),
         0. * d_loss, d_loss)
     return g_loss, d_loss
コード例 #54
0
ファイル: network.py プロジェクト: kenjyoung/Neurohex
    def __init__(self, batch_size=None, rng=None, load_file=None, params=None):
        if not rng:
            rng = np.random.RandomState(None)
        self.input = T.tensor4("input")  # position matrix
        self.batch_size = batch_size
        layer0_D3 = 48
        layer0_D5 = 80

        layer0 = HexConvLayer(
            rng,
            self.input,
            (batch_size, num_channels, input_size, input_size),
            layer0_D5,
            layer0_D3,
            params=params[0:3] if params else None,
        )

        layer1_D3 = 64
        layer1_D5 = 64

        layer1 = HexConvLayer(
            rng,
            layer0.output,
            (batch_size, layer0_D3 + layer0_D5, input_size, input_size),
            layer1_D5,
            layer1_D3,
            params[3:6] if params else None,
        )

        layer2_D3 = 80
        layer2_D5 = 48

        layer2 = HexConvLayer(
            rng,
            layer1.output,
            (batch_size, layer1_D3 + layer1_D5, input_size, input_size),
            layer2_D5,
            layer2_D3,
            params[6:9] if params else None,
        )

        layer3_D3 = 96
        layer3_D5 = 32

        layer3 = HexConvLayer(
            rng,
            layer2.output,
            (batch_size, layer2_D3 + layer2_D5, input_size, input_size),
            layer3_D5,
            layer3_D3,
            params[9:12] if params else None,
        )

        layer4_D3 = 112
        layer4_D5 = 16

        layer4 = HexConvLayer(
            rng,
            layer3.output,
            (batch_size, layer3_D3 + layer3_D5, input_size, input_size),
            layer4_D5,
            layer4_D3,
            params[12:15] if params else None,
        )

        layer5_D3 = 128
        layer5_D5 = 0

        layer5 = HexConvLayer(
            rng,
            layer4.output,
            (batch_size, layer4_D3 + layer4_D5, input_size, input_size),
            layer5_D5,
            layer5_D3,
            params[15:18] if params else None,
        )

        layer6_D3 = 128
        layer6_D5 = 0

        layer6 = HexConvLayer(
            rng,
            layer5.output,
            (batch_size, layer5_D3 + layer5_D5, input_size, input_size),
            layer6_D5,
            layer6_D3,
            params[18:21] if params else None,
        )

        layer7_D3 = 128
        layer7_D5 = 0

        layer7 = HexConvLayer(
            rng,
            layer6.output,
            (batch_size, layer6_D3 + layer6_D5, input_size, input_size),
            layer7_D5,
            layer7_D3,
            params[21:24] if params else None,
        )

        layer8_D3 = 128
        layer8_D5 = 0

        layer8 = HexConvLayer(
            rng,
            layer7.output,
            (batch_size, layer7_D3 + layer7_D5, input_size, input_size),
            layer8_D5,
            layer8_D3,
            params[24:27] if params else None,
        )

        layer9_D3 = 128
        layer9_D5 = 0

        layer9 = HexConvLayer(
            rng,
            layer8.output,
            (batch_size, layer8_D3 + layer8_D5, input_size, input_size),
            layer9_D5,
            layer9_D3,
            params[27:30] if params else None,
        )

        layer10 = FullyConnectedLayer(
            rng,
            layer9.output.flatten(2),
            (layer9_D3 + layer9_D5) * input_size * input_size,
            boardsize * boardsize,
            params[30:32] if params else None,
        )

        not_played = T.and_(
            T.eq(self.input[:, white, padding : boardsize + padding, padding : boardsize + padding].flatten(2), 0),
            T.eq(self.input[:, black, padding : boardsize + padding, padding : boardsize + padding].flatten(2), 0),
        )

        playable_output = T.nnet.softmax(layer10.output[not_played.nonzero()])

        output = T.switch(not_played, layer10.output, -1 * np.inf)

        self.output = T.nnet.softmax(output)

        self.params = (
            layer0.params
            + layer1.params
            + layer2.params
            + layer3.params
            + layer4.params
            + layer5.params
            + layer6.params
            + layer7.params
            + layer8.params
            + layer9.params
            + layer10.params
        )

        self.mem_size = (
            layer1.mem_size
            + layer2.mem_size
            + layer3.mem_size
            + layer4.mem_size
            + layer5.mem_size
            + layer6.mem_size
            + layer7.mem_size
            + layer8.mem_size
            + layer9.mem_size
            + layer10.mem_size
        )
コード例 #55
0
ファイル: spielewiese.py プロジェクト: 151706061/MITK
vw_new_direction = xi12 * T.sqrt((1. - T.sqr(u_new_direction)) / t_xi)
uvw_new_direction = T.concatenate([u_new_direction, vw_new_direction], axis=1)

#theano.printing.Print('t_xi')(t_xi)
#theano.printing.Print('vw')(vw_new_direction)
#theano.printing.Print('uvw')(uvw_new_direction)
# roulette
weight_for_starting_roulette = 0.001
CHANCE = 0.1
partakes_roulette = T.switch(T.lt(new_weight, weight_for_starting_roulette),
                             1,
                             0)
roulette = rng.uniform((photons,1), low=0., high=1.)
loses_roulette = T.gt(roulette, CHANCE)
# if roulette decides to ter+minate the photon: set weight to 0
weight_after_roulette = T.switch(T.and_(partakes_roulette, loses_roulette),
                                 0.,
                                 new_weight)
# if partakes in roulette but does not get terminated
weight_after_roulette = T.switch(T.and_(partakes_roulette, T.invert(loses_roulette)),
                                 weight_after_roulette / CHANCE,
                                 weight_after_roulette)
#theano.printing.Print('new weight')(new_weight)
#theano.printing.Print('partakes_roulette')(partakes_roulette)
#theano.printing.Print('loses_roulette')(loses_roulette)
#theano.printing.Print('weight_after_roulette')(weight_after_roulette)


one_cycle = theano.function(inputs=[mu_a, mu_s, microns_per_shell],
                            outputs=[shells, new_heats],
                            updates=OrderedDict({xyz: xyz_moved, uvw: uvw_new_direction,