Пример #1
0
	def play(self):
		u_pref = self.strategy()
		orca_self = Agent(self.state.x, self.state.v, .1, u_pref)
		orca_other = []

		# u = u_pref

		for other, state in self.state_team_neigh.items():
			orca_other.append(Agent(state.x, state.v, .1, (0, 0)))

		if orca_other:
			u, _ = orca(orca_self, orca_other, 2., 1e-2)
			if norm(u)>0:
				u = self.vmax*u/norm(u)				
		else:
			u = u_pref
		
		# velocity
		cmdV_msg = Twist()
		cmdV_msg.linear.x = u[0]
		cmdV_msg.linear.y = u[1]

		# location
		cmdX_msg = self.get_cmdX_msg(self.x0, self.state.z)

		# publish
		self.cmdV_pubs.publish(cmdV_msg)
		self.cmdX_pubs.publish(cmdX_msg)
Пример #2
0
    def __init__(self,
                 base_dir,
                 batch_size,
                 rst,
                 max_size=500,
                 multi_batch=False,
                 normalize=True,
                 preprocessing=True):
        BATCH_FILES = 4
        self.base_dir = base_dir
        self.batch_size = batch_size
        self.id = 1
        self.rst = rst
        self.multi_batch = multi_batch
        self.normalize = normalize
        self.max_size = max_size
        self.preprocessing = preprocessing
        self.x = self.get_content_images()

        if multi_batch:
            self.y = self.get_style_images(self.id)
        else:
            self.y = self.get_style_images()

        self.max_size = max_size

        if self.preprocessing:
            self.x = utils.preprocess(self.x)
            self.y = utils.preprocess(self.y)

        if normalize:
            self.x = utils.norm(self.x)
            self.y = utils.norm(self.y)
Пример #3
0
    def _prepare_data(self, X, Y, n_train, normalize=True):

        X_train, Y_train = X[:n_train], Y[:n_train]
        X_test, Y_test = X[n_train:], Y[n_train:]

        if normalize:
            X_mu, X_std = mu_std(X_train)
            Y_mu, Y_std = mu_std(Y_train)

            X_train = norm(X_train, X_mu, X_std)
            X_test = norm(X_test, X_mu, X_std)
            Y_train = norm(Y_train, Y_mu, Y_std)
            Y_test = norm(Y_test, Y_mu, Y_std)

        #ids = np.int32(np.arange(self.n_tasks))
        #np.random.shuffle(ids)
        #train_ids = ids[:n_train]
        #test_ids = ids[n_train:]

        train_ids = np.int32(list(range(n_train)))
        test_ids = np.int32(list(range(n_train, X.shape[0])))

        self.data["training"]["X"] = X_train
        self.data["training"]["Y"] = Y_train
        self.data["training"]["ids"] = train_ids
        self.data["test"]["X"] = X_test
        self.data["test"]["Y"] = Y_test
        self.data["test"]["ids"] = test_ids

        self.X_mu = X_mu
        self.X_std = X_std
        self.Y_mu = Y_mu
        self.Y_std = Y_std
Пример #4
0
def train_pretrained(epoch):
    epoch_loss = 0
    model.train()
    for iteration, batch in enumerate(training_data_loader, 1):
        input, target = batch[0], batch[1]
        minibatch = input.size()[0]
        for j in range(minibatch):
            input[j] = utils.norm(input[j], vgg=True)
            target[j] = utils.norm(target[j], vgg=True)

        if cuda:
            input = Variable(input).cuda(gpus_list[0])
            target = Variable(target).cuda(gpus_list[0])

        optimizer.zero_grad()
        sr = model(input)
        loss = MSE_loss(sr, target)
        epoch_loss += loss.data
        loss.backward()
        optimizer.step()

        print("Epoch: [%2d] [%4d/%4d] G_loss_pretrain: %.8f" %
              ((epoch), (iteration), len(training_data_loader), loss.data))

    print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(
        epoch, epoch_loss / len(training_data_loader)))
Пример #5
0
    def get_influence_estimate(self):
        """Generates one random Kron.-product estimate of the influence matrix.

        Samples a random vector nu of iid samples with 0 mean from a
        distribution given by nu_dist, and returns an updated estimate
        of A and B from Eqs. (1)-(4).

        Returns:
            Updated A (numpy array of shape (m)) and B (numpy array of shape
                (n_h, n_h))."""

        #Sample random vector (shape (2) in KF-RTRL)
        self.nu = self.sample_nu()

        #Calculate p0, p1 or override with fixed P0, P1 if given
        if self.P0 is None:
            self.p0 = np.sqrt(norm(self.B_forwards) / norm(self.A))
        else:
            self.p0 = np.copy(self.P0)
        if self.P1 is None:
            self.p1 = np.sqrt(norm(self.D) / norm(self.a_hat))
        else:
            self.p1 = np.copy(self.P1)

        #Update Kronecker product approximation
        A = self.nu[0] * self.p0 * self.A + self.nu[1] * self.p1 * self.a_hat
        B = (self.nu[0] * (1 / self.p0) * self.B_forwards + self.nu[1] *
             (1 / self.p1) * self.D)

        return A, B
Пример #6
0
def mergebands(a, b, wa = 0, wb = 0, linear = True, yfixed = False):
    ''' Combine systematics bands (optionally as weighted average). Optionally keep central value fixed.  '''
    sumwt = (wa + wb) / 0.5
    if sumwt == 0: sumwt = 1
    if wa == 0: wa = 1
    if wb == 0: wb = 1
    assert a.GetN() == b.GetN()
    tg = ROOT.TGraphAsymmErrors()
    for ib in xrange(a.GetN()):
        if yfixed:
            tg.SetPoint(ib, a.GetX()[ib], a.GetY()[ib])
        else:
            tg.SetPoint(ib, a.GetX()[ib], a.GetY()[ib] + b.GetY()[ib])
        if linear: 
            yh = (wa * a.GetErrorYhigh(ib) + wb * b.GetErrorYhigh(ib))/(sumwt)
            yl = (wa * a.GetErrorYlow(ib) + wb * b.GetErrorYlow(ib))/(sumwt)
        else: 
            yh = utils.norm((a.GetErrorYhigh(ib), b.GetErrorYhigh(ib)))
            yl = utils.norm((a.GetErrorYlow(ib), b.GetErrorYlow(ib)))
        tg.SetPointError(ib, a.GetErrorXlow(ib), a.GetErrorXhigh(ib), yl, yh)
    tg.SetFillStyle(a.GetFillStyle())
    tg.SetFillColor(a.GetFillColor())
    tg.SetMarkerStyle(a.GetMarkerStyle())
    tg.SetMarkerSize(a.GetMarkerSize())
    tg.SetLineWidth(a.GetLineWidth())
    tg.SetLineColor(a.GetLineColor())
    return tg
Пример #7
0
def test_constrain_on_polymer_end():    
    end = Point(index=19,position=[0.2,0.2,0.2])
    #end = Point(index=199,position=[0.2,0.2,0.2])
    box = Box([0,0,0],[20,20,20])
    coords,bonds,type_beads,ids = one_polymer(N=20,type_bead=1,ptolerance=0,type_polymer="linear",start_id=0,lconstrain=[end],gconstrain=[box],max_trial=1000)   
    print norm(coords[-2]-np.array([0.2,0.2,0.2]))
    assert(norm(coords[-2]-np.array([0.2,0.2,0.2])) < 3)
Пример #8
0
def gradient_it(m, eps):
    cnt = 0

    (A, B) = ut.split_at(m)
    x0 = [0] * len(B)
    r0 = ut.minus(B, ut.subst(A, x0))
    p0 = r0
    z0 = r0
    s0 = r0

    for i in range(0, 2 * len(B)):
        sub = ut.subst(A, z0)
        prev_p = p0
        prev_r = r0
        At = ut.transpose_matrix(A)
        # print(At)

        a = ut.scalar_product(p0, r0) / ut.scalar_product(s0, sub)
        x0 = ut.plus(x0, ut.mul(a, z0))
        r0 = ut.minus(r0, ut.mul(a, sub))
        p0 = ut.minus(p0, ut.mul(a, ut.subst(At, s0)))
        b = ut.scalar_product(p0, r0) / ut.scalar_product(prev_p, prev_r)
        z0 = ut.plus(r0, ut.mul(b, z0))
        s0 = ut.plus(p0, ut.mul(b, s0))

        if abs(ut.norm(r0) / ut.norm(B)) < eps:
            break
        cnt += 1

    return (x0, cnt)
Пример #9
0
def palm_position_histogram(frames, bins=8, range=(-1., 1.)):
    """
        Feature based on a 3d histogram of the normalized positions of each palm
    """
    length = range[1] - range[0]
    bin_size = length / bins

    def hround(v):
        return min(bins - 1, int((v - range[0]) / bin_size))

    l = list_3d(bins)
    positions = []
    for frame in frames:
        for hand in frame.hands():
            palm = hand.palm()
            if palm:
                p = palm.position
                positions.append(p)
    average_p = utils.ave_v(positions)
    for index, position in enumerate(positions):
        positions[index] = utils.subtract(position, average_p)
        v = positions[index]
        x = v.x / utils.norm(v)
        y = v.y / utils.norm(v)
        z = v.z / utils.norm(v)
        l[hround(x)][hround(y)][hround(z)] += 1
    return l
Пример #10
0
    def forward(self, x):
        """
        Params:
            x:    {tensor(N, n_features(128))}
        Returns:
            loss: {tensor(1)}
        """
        ## 类内,属于各类别的概率的熵,求极小
        p = torch.cat(list(
            map(lambda x: self._p(x, self.m, self.s1).unsqueeze(0), x)),
                      dim=0)  # P_{N × n_classes} = [p_{ik}]
        n = torch.cat(list(map(lambda x: norm(x, self.m).unsqueeze(0), x)),
                      dim=0)  # N_{N × n_classes} = [n_{ik}]
        intra = torch.mean(torch.sum(p * n, dim=1))

        ## 类间
        m = torch.mean(self.m, dim=0)
        p = self._p(m, self.m, self.s2)
        n = norm(m, self.m)
        inter = torch.sum(p * n)

        ## 优化目标,最小化
        # total = intra / inter
        total = intra - self.lamb * inter
        # total = intra + 1. / inter

        return total, intra, inter
Пример #11
0
def get_features(embed, mask_encoder, device, imgs):
    xs = embed(imgs.to(device))  # x_representation:[b, 2048, h, w]
    masks = mask_encoder(xs)  # [b, 1, h, w]
    if embed.model_name.startswith('efficientnet'):
        valid_layer = [2, 3]
        z_ATT = np.concatenate([
            norm(
                mask_encoder.attention_pooling(
                    x, mask).squeeze(3).squeeze(2).detach().cpu().numpy())
            for i, (x, mask) in enumerate(zip(xs, masks)) if i in valid_layer
        ],
                               axis=1)
        z_ATTMAX = np.concatenate([
            norm(
                F.adaptive_max_pool2d(x * mask, output_size=1).squeeze(
                    3).squeeze(2).detach().cpu().numpy())
            for i, (x, mask) in enumerate(zip(xs, masks)) if i in valid_layer
        ],
                                  axis=1)
        z_MAX = norm(
            F.adaptive_max_pool2d(
                xs[-1],
                output_size=1).squeeze(3).squeeze(2).detach().cpu().numpy())
        z = np.concatenate([z_ATT, z_ATTMAX, z_MAX], axis=1)
    elif embed.model_name.startswith('resnet'):
        valid_layer = [3]
        z_ATT = np.concatenate([
            norm(
                mask_encoder.attention_pooling(
                    x, mask).squeeze(3).squeeze(2).detach().cpu().numpy())
            for i, (x, mask) in enumerate(zip(xs, masks)) if i in valid_layer
        ],
                               axis=1)
        z = z_ATT
    return z
Пример #12
0
 def borders(self):
     steering = np.zeros(2)
     distance = np.array([self.width/2,self.height/2]) - self.position
     mag = norm(distance, 4)
     if(mag > self.width/2 - 20):
             steering = (distance/norm(distance,2)) * self.max_force_bor
     return steering
Пример #13
0
def palm_position_histogram(frames,bins = 8,range=(-1.,1.)):
    """
        Feature based on a 3d histogram of the normalized positions of each palm
    """
    length = range[1]-range[0]
    bin_size = length/bins

    def hround(v):
        return min(bins-1,int((v-range[0])/bin_size))

    l = list_3d(bins)
    positions = []
    for frame in frames:
        for hand in frame.hands():
            palm = hand.palm()
            if palm:
                p = palm.position
                positions.append(p)
    average_p = utils.ave_v(positions)
    for index,position in enumerate(positions):
        positions[index] = utils.subtract(position,average_p)
        v = positions[index]
        x = v.x/utils.norm(v)
        y = v.y/utils.norm(v)
        z = v.z/utils.norm(v)
        l[hround(x)][hround(y)][hround(z)] += 1
    return l
Пример #14
0
    def get_influence_estimate(self):
        """Generates one random Kron.-product estimate of the influence matrix.

        Samples a random vector nu of iid samples with 0 mean from a
        distribution given by nu_dist, and returns an updated estimate
        of A and B from Eqs. (1)-(4).

        Returns:
            Updated A (numpy array of shape (n_h)) and B (numpy array of shape
                (n_h, n_m))."""

        #Sample random vector
        self.nu = self.sample_nu()

        # Get random projection of M_immediate onto \nu
        M_projection = (self.papw.T * self.nu).T

        #Calculate scaling factors
        B_norm = norm(self.B_forwards)
        A_norm = norm(self.A)
        M_norm = norm(M_projection)
        self.p0 = np.sqrt(B_norm / A_norm)
        self.p1 = np.sqrt(M_norm / np.sqrt(self.n_h))

        #Override with fixed P0 and P1 if given
        if self.P0 is not None:
            self.p0 = np.copy(self.P0)
        if self.P1 is not None:
            self.p1 = np.copy(self.P1)

        #Update "inverse" Kronecker product approximation
        A = self.p0 * self.A + self.p1 * self.nu
        B = (1 / self.p0) * self.B_forwards + (1 / self.p1) * M_projection

        return A, B
Пример #15
0
    def strategy(self):

        # copy to prevent change during iteration
        oppo_dict = {k: v for k, v in self.state_oppo_neigh.items()}

        xds, vds = [], []
        for d, state in oppo_dict.items():
            xds.append(np.array([x for x in state.x]))
            vds.append(state.speed)
        # print(str(self), vs, vd)

        if xds:  # TODO: to add velocity estimation
            # dr = DominantRegion(self.env.target.size, vd/norm(self.state.v), self.state.x, xds, offset=0)
            dr = DominantRegion([self.r] * len(xds),
                                self.vmax,
                                vds,
                                self.state.x,
                                xds,
                                offset=0)
            xw = self.target.deepest_point_in_dr(dr)
            dx = xw - self.state.x
            dist = norm(dx)
            if dist > 1e-6:
                return self.vmax * dx / dist

        dx = np.array([self.target.x0, self.target.y0]) - self.state.x
        return vmax * dx / norm(dx)
Пример #16
0
def system_generator(x0, z0, dt):
    t = 0
    x = x0
    z = z0

    while True:
        xx = delta_matrix(x)
        xx_norm = expand(norm(xx))

        zz = delta_matrix(z)
        zz_norm = expand(norm(zz))

        xz = delta_matrix(x, z)
        zx = -np.moveaxis(xz, 0, 1)

        xz_norm = norm(xz)
        zx_norm = xz_norm.T
        xz_norm = expand(xz_norm)
        zx_norm = expand(zx_norm)

        with np.errstate(divide='ignore', invalid='ignore'):
            vx = np.nansum(prey_social(xx_norm) / xx_norm * xx, 1) / N + \
                np.nansum(prey_predator(xz_norm) / xz_norm * xz, 1) / N2
            vz = np.nansum(predator_social(zz_norm) / zz_norm * zz, 1) / N2 + \
                np.nansum(predator_prey(zx_norm) / zx_norm * zx, 1) / N

        yield t, x, z, vx, vz

        x = x + vx * dt
        z = z + vz * dt
        t += dt
Пример #17
0
def system_generator(x0,  dt):
    x = x0
    t = 0
    dB = 0
    while True:
        xx = delta_matrix(x)
        r_xx = norm(xx, keepdims=True)

        # Interaction among individuals
        with np.errstate(divide='ignore', invalid='ignore'):
            v = np.nansum(F(r_xx, a) / r_xx * xx, axis=1) / N

        # Environment influence
        if barr_type != NO_BARR:
            # Add env
            if gate_len != 0:
                # Point repulsion
                c = (0, 1.5)
                xc = x - c
                v += .5 / norm(xc, keepdims=True) ** 2 * xc
            else:
                # Gravity
                v += (0, - .1)

        # dx without barrier consideration
        # Plus random diffusion
        dx = v * dt + mu * dB

        # Now consider different barrier
        if barr_type != NO_BARR:
            x_next = x + dx

            # barrier with EPS (vague judgement, not so strict)
            pr = x[:, 1]
            nx = x_next[:, 1]
            invalid = (pr > BARR) & (nx < BARR + EPS)  # Upper to lower
            invalid |= (pr < BARR) & (nx > BARR - EPS)  # Lower to upper

            if barr_type == ABSORB:
                dx *= ~ expand(invalid)
            else:
                # Reflecting barrier
                if gate_len != 0:
                    # Exclude those via gate: if before or after are within gate
                    exclude = (np.abs(x_next[:, 0] - 0) < gate_len /
                               2) | (np.abs(x[:, 0] - 0) < gate_len / 2)
                    invalid &= ~ exclude

                bdd = np.hstack((np.tile(False, (N, 1)), expand(invalid)))
                dx = np.where(bdd, - dx, dx)

        v = dx / dt
        yield t, x, v
        t += dt
        # Do not use +=, as it modifies the mutable variable x (by `__iadd__` method)
        x = x + dx
        if mu != 0:
            dB = np.random.randn(N, 2)
Пример #18
0
def sparse_correlation(ind1, data1, ind2, data2, n_features):

    mu_x = 0.0
    mu_y = 0.0
    dot_product = 0.0

    if ind1.shape[0] == 0 and ind2.shape[0] == 0:
        return 0.0
    elif ind1.shape[0] == 0 or ind2.shape[0] == 0:
        return 1.0

    for i in range(data1.shape[0]):
        mu_x += data1[i]
    for i in range(data2.shape[0]):
        mu_y += data2[i]

    mu_x /= n_features
    mu_y /= n_features

    shifted_data1 = np.empty(data1.shape[0], dtype=np.float32)
    shifted_data2 = np.empty(data2.shape[0], dtype=np.float32)

    for i in range(data1.shape[0]):
        shifted_data1[i] = data1[i] - mu_x
    for i in range(data2.shape[0]):
        shifted_data2[i] = data2[i] - mu_y

    norm1 = np.sqrt(
        (norm(shifted_data1) ** 2) + (n_features - ind1.shape[0]) * (mu_x ** 2)
    )
    norm2 = np.sqrt(
        (norm(shifted_data2) ** 2) + (n_features - ind2.shape[0]) * (mu_y ** 2)
    )

    dot_prod_inds, dot_prod_data = sparse_mul(ind1, shifted_data1, ind2, shifted_data2)

    common_indices = set(dot_prod_inds)

    for i in range(dot_prod_data.shape[0]):
        dot_product += dot_prod_data[i]

    for i in range(ind1.shape[0]):
        if ind1[i] not in common_indices:
            dot_product -= shifted_data1[i] * (mu_y)

    for i in range(ind2.shape[0]):
        if ind2[i] not in common_indices:
            dot_product -= shifted_data2[i] * (mu_x)

    all_indices = arr_union(ind1, ind2)
    dot_product += mu_x * mu_y * (n_features - all_indices.shape[0])

    if norm1 == 0.0 and norm2 == 0.0:
        return 0.0
    elif dot_product == 0.0:
        return 1.0
    else:
        return 1.0 - (dot_product / (norm1 * norm2))
Пример #19
0
 def normalize_vec(X, n = 2):
     """Normalizes two parts (:m and m:) of the vector"""
     X_m = X[:m]
     X_n = X[m:]
     norm_X_m = norm(X_m, n)
     Y_m = [x/norm_X_m for x in X_m]
     norm_X_n = norm(X_n, n)
     Y_n = [x/norm_X_n for x in X_n]
     return Y_m + Y_n
Пример #20
0
 def normalize_vec(X, n = 2):
     """Normalizes two parts (:m and m:) of the vector"""
     X_m = X[:m]
     X_n = X[m:]
     norm_X_m = norm(X_m, n)
     Y_m = [x/norm_X_m for x in X_m]
     norm_X_n = norm(X_n, n)
     Y_n = [x/norm_X_n for x in X_n]
     return Y_m + Y_n
Пример #21
0
def truncated_svd(X, num_val=2, max_iter=1000):
    """Computes the first component of SVD"""

    def normalize_vec(X, n = 2):
        """Normalizes two parts (:m and m:) of the vector"""
        X_m = X[:m]
        X_n = X[m:]
        norm_X_m = norm(X_m, n)
        Y_m = [x/norm_X_m for x in X_m]
        norm_X_n = norm(X_n, n)
        Y_n = [x/norm_X_n for x in X_n]
        return Y_m + Y_n

    def remove_component(X):
        """Removes components of already obtained eigen vectors from X"""
        X_m = X[:m]
        X_n = X[m:]
        for eivec in eivec_m:
            coeff = dotproduct(X_m, eivec)
            X_m = [x1 - coeff*x2 for x1, x2 in zip(X_m, eivec)]
        for eivec in eivec_n:
            coeff = dotproduct(X_n, eivec)
            X_n = [x1 - coeff*x2 for x1, x2 in zip(X_n, eivec)]
        return X_m + X_n

    m, n = len(X), len(X[0])
    A = [[0 for _ in range(n + m)] for _ in range(n + m)]
    for i in range(m):
        for j in range(n):
            A[i][m + j] = A[m + j][i] = X[i][j]

    eivec_m = []
    eivec_n = []
    eivals = []

    for _ in range(num_val):
        X = [random.random() for _ in range(m + n)]
        X = remove_component(X)
        X = normalize_vec(X)

        for _ in range(max_iter):
            old_X = X
            X = matrix_multiplication(A, [[x] for x in X])
            X = [x[0] for x in X]
            X = remove_component(X)
            X = normalize_vec(X)
            # check for convergence
            if norm([x1 - x2 for x1, x2 in zip(old_X, X)]) <= 1e-10:
                break

        projected_X = matrix_multiplication(A, [[x] for x in X])
        projected_X = [x[0] for x in projected_X]
        eivals.append(norm(projected_X, 1)/norm(X, 1))
        eivec_m.append(X[:m])
        eivec_n.append(X[m:])
    return (eivec_m, eivec_n, eivals)
Пример #22
0
def truncated_svd(X, num_val=2, max_iter=1000):
    """Computes the first component of SVD"""

    def normalize_vec(X, n = 2):
        """Normalizes two parts (:m and m:) of the vector"""
        X_m = X[:m]
        X_n = X[m:]
        norm_X_m = norm(X_m, n)
        Y_m = [x/norm_X_m for x in X_m]
        norm_X_n = norm(X_n, n)
        Y_n = [x/norm_X_n for x in X_n]
        return Y_m + Y_n

    def remove_component(X):
        """Removes components of already obtained eigen vectors from X"""
        X_m = X[:m]
        X_n = X[m:]
        for eivec in eivec_m:
            coeff = dotproduct(X_m, eivec)
            X_m = [x1 - coeff*x2 for x1, x2 in zip(X_m, eivec)]
        for eivec in eivec_n:
            coeff = dotproduct(X_n, eivec)
            X_n = [x1 - coeff*x2 for x1, x2 in zip(X_n, eivec)]
        return X_m + X_n

    m, n = len(X), len(X[0])
    A = [[0 for _ in range(n + m)] for _ in range(n + m)]
    for i in range(m):
        for j in range(n):
            A[i][m + j] = A[m + j][i] = X[i][j]

    eivec_m = []
    eivec_n = []
    eivals = []

    for _ in range(num_val):
        X = [random.random() for _ in range(m + n)]
        X = remove_component(X)
        X = normalize_vec(X)

        for _ in range(max_iter):
            old_X = X
            X = matrix_multiplication(A, [[x] for x in X])
            X = [x[0] for x in X]
            X = remove_component(X)
            X = normalize_vec(X)
            # check for convergence
            if norm([x1 - x2 for x1, x2 in zip(old_X, X)]) <= 1e-10:
                break

        projected_X = matrix_multiplication(A, [[x] for x in X])
        projected_X = [x[0] for x in projected_X]
        eivals.append(norm(projected_X, 1)/norm(X, 1))
        eivec_m.append(X[:m])
        eivec_n.append(X[m:])
    return (eivec_m, eivec_n, eivals)
Пример #23
0
 def sample(self, N=1, rsf=10):
 
     '''
     Args:
         N = number of samples to generate
         rsf = multiplicative factor for extra backup samples in rejection sampling 
     
     Returns:
         samples; N samples generated
         
     Notes:
         no autodiff
     '''
     
     d = self.x_dim
     
     with torch.no_grad():
         
         mu, kappa = self.get_params()
     
         # Step-1: Sample uniform unit vectors in R^{d-1} 
         v = torch.randn(N, d-1).to(mu)
         v = v / utils.norm(v, dim=1)
         
         # Step-2: Sample v0
         kmr = np.sqrt( 4*kappa.item()**2 + (d-1)**2 )
         bb = (kmr - 2*kappa) / (d-1)
         aa = (kmr + 2*kappa + d - 1) / 4
         dd = (4*aa*bb)/(1+bb) - (d-1)*np.log(d-1)
         beta = torch.distributions.Beta( torch.tensor(0.5*(d-1)), torch.tensor(0.5*(d-1)) )
         uniform = torch.distributions.Uniform(0.0, 1.0)
         v0 = torch.tensor([]).to(mu)
         while len(v0) < N:
             eps = beta.sample([1, rsf*(N-len(v0))]).squeeze().to(mu)
             uns = uniform.sample([1, rsf*(N-len(v0))]).squeeze().to(mu)
             w0 = (1 - (1+bb)*eps) / (1 - (1-bb)*eps)
             t0 = (2*aa*bb) / (1 - (1-bb)*eps)
             det = (d-1)*t0.log() - t0 + dd - uns.log()
             v0 = torch.cat([v0, torch.tensor(w0[det>=0]).to(mu)])
             if len(v0) > N:
                 v0 = v0[:N]
                 break
         v0 = v0.reshape([N,1])
         
         # Step-3: Form x = [v0; sqrt(1-v0^2)*v]
         samples = torch.cat([v0, (1-v0**2).sqrt()*v], 1)
 
         # Setup-4: Householder transformation
         e1mu = torch.zeros(d,1).to(mu);  e1mu[0,0] = 1.0
         e1mu = e1mu - mu if len(mu.shape)==2 else e1mu - mu.unsqueeze(1)
         e1mu = e1mu / utils.norm(e1mu, dim=0)
         samples = samples - 2 * (samples @ e1mu) @ e1mu.t()
 
     return samples
Пример #24
0
def householder_vector(x):
    """
    This function creates a householder_reflector that can be used to form H as I - 2*v*v' to zero first entry
    :param x: a column vector
    :return: a column vector useful to form H
    """
    s = -np.sign(x[0]) * norm(x)
    v = np.copy(x)
    v[0] = v[0] - s
    v = np.true_divide(v, norm(v))
    return v, s
Пример #25
0
def test_create_mixture():
    coords, bonds, type_beads, ids = one_polymer(N=3,
                                                 type_bead=[0, 1, 0],
                                                 liaison={
                                                     "0-0": [1.0, 0],
                                                     "0-1": [2.0, 1]
                                                 },
                                                 ptolerance=0,
                                                 type_polymer="linear",
                                                 start_id=0)
    assert (bonds[0] == [[0, 1, 0, 1], [1, 1, 1, 2]])
    assert (round(norm(coords[0] - coords[1]), 3) == 2.00)
    assert (round(norm(coords[1] - coords[2]), 3) == 2.00)
    def test_single(self, img_fn):
        # networks
        self.G = Generator(num_channels=self.num_channels,
                           base_filter=64,
                           num_residuals=16)

        if self.gpu_mode:
            self.G.cuda()

        # load model
        self.load_model()

        # load data
        img = Image.open(img_fn).convert('RGB')

        if self.num_channels == 1:
            img = img.convert('YCbCr')
            img_y, img_cb, img_cr = img.split()

            input = ToTensor()(img_y)
            y_ = Variable(utils.norm(input.unsqueeze(1), vgg=True))
        else:
            input = ToTensor()(img).view(1, -1, img.height, img.width)
            y_ = Variable(utils.norm(input, vgg=True))

        if self.gpu_mode:
            y_ = y_.cuda()

        # prediction
        self.G.eval()
        recon_img = self.G(y_)
        recon_img = utils.denorm(recon_img.cpu().data[0].clamp(0, 1), vgg=True)
        recon_img = ToPILImage()(recon_img)

        if self.num_channels == 1:
            # merge color channels with super-resolved Y-channel
            recon_y = recon_img
            recon_cb = img_cb.resize(recon_y.size, Image.BICUBIC)
            recon_cr = img_cr.resize(recon_y.size, Image.BICUBIC)
            recon_img = Image.merge(
                'YCbCr', [recon_y, recon_cb, recon_cr]).convert('RGB')

        # save img
        result_dir = os.path.join(self.save_dir, 'test_result')
        if not os.path.exists(result_dir):
            os.makedirs(result_dir)
        save_fn = result_dir + '/SR_result.png'
        recon_img.save(save_fn)

        print('Single test result image is saved.')
        return recon_img
Пример #27
0
 def draw_link(self, link):
     pos = link.child.localPos3
     r = utils.norm(pos)
     if r < 1e-8:
         return
     n = np.cross((0, 0, 1), pos / r)
     a = np.arccos(np.dot((0, 0, 1), pos / r))
     with utils.glPreserveMatrix():
         if utils.norm(n) > 1e-8:
             glRotated(a * 180 / np.pi, *n/utils.norm(n))
         quad = gluNewQuadric()
         G.glMaterialfv(GL_FRONT, GL_DIFFUSE, self.LINK_COLOR)
         gluCylinder(quad, self.LINK_RADIUS, self.LINK_RADIUS, r, 16, 1)
         gluDeleteQuadric(quad)
Пример #28
0
def test_constrain_on_polymer_end():
    end = Point(index=19, position=[0.2, 0.2, 0.2])
    #end = Point(index=199,position=[0.2,0.2,0.2])
    box = Box([0, 0, 0], [20, 20, 20])
    coords, bonds, type_beads, ids = one_polymer(N=20,
                                                 type_bead=1,
                                                 ptolerance=0,
                                                 type_polymer="linear",
                                                 start_id=0,
                                                 lconstrain=[end],
                                                 gconstrain=[box],
                                                 max_trial=1000)
    print norm(coords[-2] - np.array([0.2, 0.2, 0.2]))
    assert (norm(coords[-2] - np.array([0.2, 0.2, 0.2])) < 3)
Пример #29
0
    def cohesion(self, boids, center_of_mass):

        steering = np.zeros(2)
        vec_to_com = center_of_mass - self.position
        mag = norm(vec_to_com, 2)
        if mag > 0:
            vec_to_com = (vec_to_com / mag) * self.max_speed

        steering = vec_to_com - self.velocity

        mag = norm(steering,2)
        if mag > self.max_force_coe:
            steering = (steering /mag) * self.max_force_coe

        return steering
Пример #30
0
def sparse_cosine(ind1, data1, ind2, data2):
    aux_inds, aux_data = sparse_mul(ind1, data1, ind2, data2)
    result = 0.0
    norm1 = norm(data1)
    norm2 = norm(data2)

    for i in range(aux_data.shape[0]):
        result += aux_data[i]

    if norm1 == 0.0 and norm2 == 0.0:
        return 0.0
    elif norm1 == 0.0 or norm2 == 0.0:
        return 1.0
    else:
        return 1.0 - (result / (norm1 * norm2))
Пример #31
0
def dist_triangle_probs_to_sampled_surface(surf_pos, surf_normals, gen_verts, gen_faces, gen_face_probs, n_sample_pts=5000):
    if gen_faces.shape[0] == 0:
        return torch.tensor(0., device=surf_verts.device)

    # get a characteristic length
    char_len = utils.norm(surf_pos - torch.mean(surf_pos, dim=0, keepdim=True)).mean()

    # Sample points on the generated triangulation
    samples, face_inds, _ = mesh_utils.sample_points_on_surface(
        gen_verts, gen_faces, n_sample_pts, return_inds_and_bary=True)

    # Likelihoods associated with each point
    point_probs = gen_face_probs[face_inds]

    # Measure the distance to the surface
    knn_dist, neigh = knn.find_knn(samples, surf_pos, k=1)
    neigh_pos = surf_pos[neigh.squeeze(1), :]

    if len(surf_normals) == 0 :
        dists = knn_dist
    else:
        neigh_normal = surf_normals[neigh.squeeze(1), :]
        vecs = neigh_pos - samples
        dists = torch.abs(utils.dot(vecs, neigh_normal))
    
    # Expected distance integral
    exp_dist = torch.mean(dists * point_probs)

    return exp_dist / char_len
Пример #32
0
    def side(self, v0, v1, v2, origin, direction):
        v0v1 = sub(v1, v0)
        v0v2 = sub(v2, v0)

        N = cross(v0v1, v0v2)
        
        raydirection = dot(N, direction)

        if abs(raydirection) < 0.0001:
            return None
        
        d = dot(N, v0)
        
        t = (dot(N, origin) + d) / raydirection
        
        if t < 0:
            return None

        P = sum(origin, mul(direction, t))
        U, V, W = barycentric(v0, v1, v2, P)
        
        if U < 0 or V < 0 or W < 0:
            return None
        else: 
            return Intersect(distance = d,
                         point = P,
                         normal = norm(N))
    def update(engine, batch):
        E.train()
        D.train()

        image = norm(batch['image'])

        if config.training.use_cuda:
            image = image.cuda(non_blocking=True).float()
        else:
            image = image.float()

        e_optim.zero_grad()
        d_optim.zero_grad()

        z, z_mu, z_logvar = E(image)
        x_r = D(z)

        l_vae_reg = l_reg(z_mu, z_logvar)
        l_vae_recon = l_recon(x_r, image)
        l_vae_total = l_vae_reg + l_vae_recon

        l_vae_total.backward()

        e_optim.step()
        d_optim.step()

        if config.training.use_cuda:
            torch.cuda.synchronize()

        return {
            'TotalLoss': l_vae_total.item(),
            'EncodeLoss': l_vae_reg.item(),
            'ReconLoss': l_vae_recon.item(),
        }
Пример #34
0
def eval():
    model.eval()
    for batch in testing_data_loader:
        input, name = batch[0], batch[2]
        input[0] = utils.norm(input[0], vgg=True)

        with torch.no_grad():
            input = Variable(input)

        if cuda:
            input = input.cuda(gpus_list[0])

        t0 = time.time()
        if opt.chop_forward:
            with torch.no_grad():
                prediction = chop_forward(input, model, opt.upscale_factor)
        else:
            if opt.self_ensemble:
                with torch.no_grad():
                    prediction = x8_forward(input, model)
            else:
                with torch.no_grad():
                    prediction = model(input)
        t1 = time.time()
        print("===> Processing: %s || Timer: %.4f sec." % (name[0], (t1 - t0)))
        prediction = utils.denorm(prediction.data[0], vgg=True)
        save_img(prediction.cpu(), name[0])
def encode_points_and_triangles(query_triangles_pos, nearby_points_pos, 
    nearby_triangles_pos=None, nearby_triangle_probs=None):

    B = query_triangles_pos.shape[0]
    Q = query_triangles_pos.shape[1]
    K = nearby_points_pos.shape[2]

    have_triangles = (nearby_triangles_pos is not None)
    if have_triangles:
      K_T = nearby_triangles_pos.shape[2]

    # Normalize neighborhood (translation won't matter, but unit scale is nice)
    # note that we normalize vs. the triangle, not vs. the points
    neigh_centers = torch.mean(query_triangles_pos, dim=2) # (B, Q, 3)
    neigh_scales = torch.mean(utils.norm(query_triangles_pos - neigh_centers.unsqueeze(2)), dim=-1) + 1e-5 # (B, Q)
    nearby_points_pos = nearby_points_pos.clone() / neigh_scales.unsqueeze(-1).unsqueeze(-1)
    query_triangles_pos = query_triangles_pos.clone() / neigh_scales.unsqueeze(-1).unsqueeze(-1)
    if have_triangles:
      nearby_triangles_pos = nearby_triangles_pos.clone() / neigh_scales.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)

    # Encode the nearby points
    point_coords = generate_coords(nearby_points_pos, query_triangles_pos)

    # Encode the nearby triangles
    if have_triangles:
      tri_coords = generate_coords(nearby_triangles_pos.view(B, Q, K_T*3, 3), query_triangles_pos).view(B, Q, K_T, 3, 6)
      max_vals = torch.max(tri_coords, dim=3).values  # (B, Q, K_T, 6)
      min_vals = torch.min(tri_coords, dim=3).values  # (B, Q, K_T, 6)
      triangle_coords = torch.cat((min_vals, max_vals, nearby_triangle_probs.unsqueeze(-1)), dim=-1)

    if have_triangles:
      return point_coords, triangle_coords
    else:
      return point_coords
Пример #36
0
def check_cost(asset: str,
               total: Decimal,
               account: BaseAccount,
               convert_to='BTC') -> bool:
    cost_limit = Decimal(settings.BALANCE_SHOW_LIMIT_BTC)
    exchange = account.trade_exchange

    if norm(total) == '0':
        return False

    if asset == convert_to:
        return total >= cost_limit

    price = get_price(asset, convert_to, exchange)
    if price is None:
        return True

    cost = price * total

    if cost < cost_limit:
        logger.info(
            '%s %s balance is %s, too small. Price %s, cost %s. Skipping...',
            account.owner, asset, total, price, cost)
        return False

    return True
Пример #37
0
def hand_velocity_histogram(frames,bins = 4,range=(-1.,1.)):
    length = range[1]-range[0]
    bin_size = length/bins

    def hround(v):
        return min(bins-1,int((v-range[0])/bin_size))

    l = list_3d(bins)
    for frame in frames:
        for hand in frame.hands():
            v = hand.velocity()
            if v:
                x = v.x/utils.norm(v)
                y = v.y/utils.norm(v)
                z = v.z/utils.norm(v)
                l[hround(x)][hround(y)][hround(z)] += 1
    return l
Пример #38
0
def palm_normal_histogram(frames,bins = 4,range=(-1.,1.)):
    length = range[1]-range[0]
    bin_size = length/bins

    def hround(v):
        return min(bins-1,int((v-range[0])/bin_size))

    l = list_3d(bins)
    for frame in frames:
        for hand in frame.hands():
            palm = hand.palm()
            if palm:
                v = palm.direction
                x = v.x/utils.norm(v)
                y = v.y/utils.norm(v)
                z = v.z/utils.norm(v)
                l[hround(x)][hround(y)][hround(z)] += 1
    return l
Пример #39
0
def velocity_histogram(frames,bins = 4,range=(-1.,1.)):
    length = range[1]-range[0]
    bin_size = length/bins

    def hround(v):
        return min(bins-1,int((v-range[0])/bin_size))

    l = list_3d(bins)
    for frame in frames:
        for hand in frame.hands():
            for finger in hand.fingers():
                v = finger.velocity()
                vector = Leap.Vector(v.x,v.y,v.z)
                x = v.x/utils.norm(vector)
                y = v.y/utils.norm(vector)
                z = v.z/utils.norm(vector)
                l[hround(x)][hround(y)][hround(z)] += 1
    return l
Пример #40
0
def hand_velocity_histogram(frames,bins = 8,range=(-1.,1.)):
    """
        Feature based on a 3d histogram of the normalized velocity vectors of each hand
    """
    length = range[1]-range[0]
    bin_size = length/bins

    def hround(v):
        return min(bins-1,int((v-range[0])/bin_size))

    l = list_3d(bins)
    for frame in frames:
        for hand in frame.hands():
            v = hand.velocity()
            if v:
                x = v.x/utils.norm(v)
                y = v.y/utils.norm(v)
                z = v.z/utils.norm(v)
                l[hround(x)][hround(y)][hround(z)] += 1
    return l
Пример #41
0
def position_histogram(frames,bins = 4,range=(-1.,1.)):
    length = range[1]-range[0]
    bin_size = length/bins

    def hround(v):
        return min(bins-1,int((v-range[0])/bin_size))

    l = list_3d(bins)
    positions = []
    for frame in frames:
        for hand in frame.hands():
            for finger in hand.fingers():
                p = finger.tip().position
                positions.append(p)
    average_p = utils.ave_v(positions)
    for index,position in enumerate(positions):
        positions[index] = utils.subtract(position,average_p)
        v = positions[index]
        x = v.x/utils.norm(v)
        y = v.y/utils.norm(v)
        z = v.z/utils.norm(v)
        l[hround(x)][hround(y)][hround(z)] += 1
    return l
Пример #42
0
def length(frames,amount_used=.1):
    """
        Returns the average distance each finger moves in the gesture
    """

    num_frames = len(frames)
    num_used = int(amount_used*num_frames)

    firsts = [[] for i in range(num_used)]
    lasts = [[] for i in range(num_used)]

    for f,first in zip(frames[0:num_used],firsts):
        positions = get_positions(f)
        for position in positions:
            first.append(position)
    for f,last in zip(frames[-num_used:],lasts):
        positions = get_positions(f)
        for position in positions:
            last.append(position)

    lengths = [utils.norm(utils.subtract(utils.average_position(first),utils.average_position(last))) for first,last in zip(firsts,lasts)]

    return lengths
Пример #43
0
 def Consultar(self, nro_doc):
     "Llama a la API pública de AFIP para obtener los datos de una persona"
     n = 0
     while n <= 4:
         n += 1                          # reintentar 3 veces
         try:
             if not self.client:
                 if DEBUG:
                     warnings.warn("reconectando intento [%d]..." % n)
                 self.Conectar()
             self.response = self.client("sr-padron", "v2", "persona", str(nro_doc))
         except Exception as e:
             self.client = None
             ex = exception_info()
             self.Traceback = ex.get("tb", "")
             try:
                 self.Excepcion = norm(ex.get("msg", "").replace("\n", ""))
             except:
                 self.Excepcion = "<no disponible>"
             if DEBUG:
                 warnings.warn("Error %s [%d]" % (self.Excepcion, n))
         else:
             break
     else:
         return False
     result = json.loads(self.response)
     if result['success']:
         data = result['data']
         # extraigo datos generales del contribuyente:
         self.cuit = data["idPersona"]
         self.tipo_persona = data["tipoPersona"]
         self.tipo_doc = TIPO_CLAVE.get(data["tipoClave"])
         self.dni = data.get("numeroDocumento")
         self.estado = data.get("estadoClave")
         self.denominacion = data.get("nombre")
         # analizo el domicilio
         domicilio = data.get("domicilioFiscal")
         if domicilio:
             self.direccion = domicilio.get("direccion", "")
             self.localidad = domicilio.get("localidad", "")  # no usado en CABA
             self.provincia = PROVINCIAS.get(domicilio.get("idProvincia"), "")
             self.cod_postal = domicilio.get("codPostal")
         else:
             self.direccion = self.localidad = self.provincia = ""
             self.cod_postal = ""
         # retrocompatibilidad:
         self.domicilios = ["%s - %s (%s) - %s" % (
                                 self.direccion, self.localidad, 
                                 self.cod_postal, self.provincia,) ]
         # analizo impuestos:
         self.impuestos = data.get("impuestos", [])
         self.actividades = data.get("actividades", [])
         if 32 in self.impuestos:
             self.imp_iva = "EX"
         elif 33 in self.impuestos:
             self.imp_iva = "NI"
         elif 34 in self.impuestos:
             self.imp_iva = "NA"
         else:
             self.imp_iva = "S" if 30 in self.impuestos else "N"
         mt = data.get("categoriasMonotributo", {})
         self.monotributo = "S" if mt else "N"
         self.actividad_monotributo = "" # TODO: mt[0].get("idCategoria")
         self.integrante_soc = ""
         self.empleador = "S" if 301 in self.impuestos else "N"
         self.cat_iva = ""
         self.data = data
     else:
         error = result['error']
         self.Excepcion = error['mensaje']
     return True
Пример #44
0
import matplotlib.pyplot as plt
import numpy as np
from pylab import *
import random
import utils

plt.clf()
hit = 0
candidate = 0
data = [] 
where_we_at = 0
acceptance_ratio = 0
mixture = lambda x: .2*utils.norm(x,1,25) + .3*utils.norm(x,-2,1) + .5*utils.norm(x,3,4)

#for i in range(1000):
for i in range(500):
    candidate = random.gauss(where_we_at,3)
    acceptance_ratio = mixture(candidate)/mixture(where_we_at) 
    
    if (acceptance_ratio >= 1):
        #auto accept
        #if (i >= 500):
        hit += 1
        data.append(candidate)
        where_we_at = candidate

    else:
        if (random.uniform(0,1) <= acceptance_ratio):
            #accept
            #if (i >= 500):
            hit += 1
Пример #45
0
def test_constrain_on_polymer_start():    
    start = Point(index=0,position=[0.2,0.2,0.2])
    #end = Point(index=199,position=[0.2,0.2,0.2])
    box = Box([0,0,0],[10,10,10])
    coords,bonds,type_beads,ids = one_polymer(N=20,type_bead=1,ptolerance=0,type_polymer="linear",start_id=0,lconstrain=[start],gconstrain=[box])   
    assert(norm(coords[0]-np.array([0.2,0.2,0.2])) < 0.01)
Пример #46
0
def test_create():
    
    coords,bonds,type_beads,ids = one_polymer(N=2,type_bead=0,liaison={"0-0":[1.0,0]},ptolerance=0,type_polymer="linear",start_id=0)
    assert (bonds[0] == [[0,0,0,1]])
    assert (round(norm(coords[0]-coords[1]),3) == 1.00)
    assert(type_beads == [0,0])
Пример #47
0
def test_create_mixture():
    coords,bonds,type_beads,ids = one_polymer(N=3,type_bead=[0,1,0],liaison={"0-0":[1.0,0],"0-1":[2.0,1]},ptolerance=0,type_polymer="linear",start_id=0)
    assert (bonds[0] == [[0,1,0,1],[1,1,1,2]])
    assert (round(norm(coords[0]-coords[1]),3) == 2.00)
    assert (round(norm(coords[1]-coords[2]),3)== 2.00)
Пример #48
0
                 "empleador", "imp_ganancias", "integrante_soc"]
     csv_writer.writerow(columnas)
     
     for fila in csv_reader:
         cuit = (fila[0] if fila else "").replace("-", "")
         if cuit.isdigit():
             if '--online' in sys.argv:
                 padron.Conectar(trace="--trace" in sys.argv)
                 print "Consultando AFIP online...", cuit,
                 ok = padron.Consultar(cuit)
             else:
                 print "Consultando AFIP local...", cuit,
                 ok = padron.Buscar(cuit)
             print 'ok' if ok else "error", padron.Excepcion
             # domicilio posiblemente esté en Latin1, normalizar
             csv_writer.writerow([norm(getattr(padron, campo, ""))
                                  for campo in columnas])
 else:
     cuit = len(sys.argv)>1 and sys.argv[1] or "20267565393"
     # consultar un cuit:
     if '--online' in sys.argv:
         padron.Conectar(trace="--trace" in sys.argv)
         print "Consultando AFIP online...",
         ok = padron.Consultar(cuit)
         print 'ok' if ok else "error", padron.Excepcion
         print "Denominacion:", padron.denominacion
         print "CUIT:", padron.cuit 
         print "Tipo:", padron.tipo_persona, padron.tipo_doc, padron.dni
         print "Estado:", padron.estado
         print "Direccion:", padron.direccion
         print "Localidad:", padron.localidad
Пример #49
0
 def is_quest_item(self):
     return norm(self.type).startswith(constants.QUEST_ITEMS)
Пример #50
0
 def __add__(self, other):
     if other.cutdef != self.cutdef:
         self.log.verbose('addition of counters with different cuts')
     y = self.count + other.count
     dy = utils.norm((self.error, other.error)) 
     return counter('(%s)+(%s)'%(self.symbol, other.symbol), y, dy, self.cutdef)
Пример #51
0
 def __sub__(self, other):
     if other.cutdef != self.cutdef:
         self.log.verbose('difference of counters with different cuts')
     y = self.count - other.count
     dy = utils.norm((self.error, other.error)) 
     return counter('(%s)-(%s)'%(self.symbol, other.symbol), y, dy, self.cutdef)
Пример #52
0
 def __mul__(self, other):
     if other.cutdef != self.cutdef:
         self.log.verbose('product of counters with different cuts')
     y = self.count * other.count
     dy = utils.norm((other.count*self.error, self.count*other.error)) 
     return counter('(%s)*(%s)'%(self.symbol, other.symbol), y, dy, self.cutdef)
Пример #53
0
 def __div__(self, other):
     if other.cutdef != self.cutdef:
         self.log.verbose('ratio of counters with different cuts')
     y = self.count / other.count
     dy = 1/pow(other.count,2) * utils.norm((other.count*self.error, self.count*other.error)) 
     return counter('(%s)/(%s)'%(self.symbol, other.symbol), y, dy, self.cutdef)
Пример #54
0
def main():
    "Función principal de pruebas (obtener CAE)"
    import os, time
    global CONFIG_FILE

    DEBUG = '--debug' in sys.argv

    if '--constancia' in sys.argv:
        padron = WSSrPadronA5()
        SECTION = 'WS-SR-PADRON-A5'
        service = "ws_sr_constancia_inscripcion"
    else:
        padron = WSSrPadronA4()
        SECTION = 'WS-SR-PADRON-A4'
        service = "ws_sr_padron_a4"

    config = abrir_conf(CONFIG_FILE, DEBUG)
    if config.has_section('WSAA'):
        crt = config.get('WSAA', 'CERT')
        key = config.get('WSAA', 'PRIVATEKEY')
        cuit = config.get(SECTION, 'CUIT')
    else:
        crt, key = "reingart.crt", "reingart.key"
        cuit = "20267565393"
    url_wsaa = url_ws = None
    if config.has_option('WSAA','URL'):
        url_wsaa = config.get('WSAA', 'URL')
    if config.has_option(SECTION,'URL') and not H**O:
        url_ws = config.get(SECTION, 'URL')

    # obteniendo el TA para pruebas
    from wsaa import WSAA

    cache = ""
    ta = WSAA().Autenticar(service, crt, key, url_wsaa)

    padron.SetTicketAcceso(ta)
    padron.Cuit = cuit
    padron.Conectar(cache, url_ws, cacert="conf/afip_ca_info.crt")

    if "--dummy" in sys.argv:
        print padron.client.help("dummy")
        wssrpadron4.Dummy()
        print "AppServerStatus", wssrpadron4.AppServerStatus
        print "DbServerStatus", wssrpadron4.DbServerStatus
        print "AuthServerStatus", wssrpadron4.AuthServerStatus

    if '--csv' in sys.argv:
        csv_reader = csv.reader(open("entrada.csv", "rU"), 
                                dialect='excel', delimiter=",")
        csv_writer = csv.writer(open("salida.csv", "w"), 
                                dialect='excel', delimiter=",")
        encabezado = next(csv_reader)
        columnas = ["cuit", "denominacion", "estado", "direccion",
                    "localidad", "provincia", "cod_postal",
                    "impuestos", "actividades", "imp_iva", 
                    "monotributo", "actividad_monotributo", 
                    "empleador", "imp_ganancias", "integrante_soc"]
        csv_writer.writerow(columnas)
        
        for fila in csv_reader:
            cuit = (fila[0] if fila else "").replace("-", "")
            if cuit.isdigit():
                print "Consultando AFIP online...", cuit,
                try:
                    ok = padron.Consultar(cuit)
                except SoapFault as e:
                    ok = None
                    if e.faultstring != "No existe persona con ese Id":
                        raise
                print 'ok' if ok else "error", padron.Excepcion
                # domicilio posiblemente esté en Latin1, normalizar
                csv_writer.writerow([norm(getattr(padron, campo, ""))
                                     for campo in columnas])
        sys.exit(0)

    try:

        if "--prueba" in sys.argv:
            id_persona = "20000000516"
        else:
            id_persona = len(sys.argv)>1 and sys.argv[1] or "20267565393"

        if "--testing" in sys.argv:
            padron.LoadTestXML("tests/xml/%s_resp.xml" % service)
        print "Consultando AFIP online via webservice...",
        ok = padron.Consultar(id_persona)

        if DEBUG:
            print "Persona", padron.Persona
            print padron.Excepcion

        print 'ok' if ok else "error", padron.Excepcion
        print "Denominacion:", padron.denominacion
        print "Tipo:", padron.tipo_persona, padron.tipo_doc, padron.nro_doc
        print "Estado:", padron.estado
        print "Direccion:", padron.direccion
        print "Localidad:", padron.localidad
        print "Provincia:", padron.provincia
        print "Codigo Postal:", padron.cod_postal
        print "Impuestos:", padron.impuestos
        print "Actividades:", padron.actividades
        print "IVA", padron.imp_iva
        print "MT", padron.monotributo, padron.actividad_monotributo
        print "Empleador", padron.empleador

        if padron.Excepcion:
            print "Excepcion:", padron.Excepcion
            # ver padron.errores para el detalle

    except:
        raise
        print padron.XmlRequest
        print padron.XmlResponse
Пример #55
0
import matplotlib.pyplot as plt
import numpy as np
from pylab import *
import random
import utils
import math

plt.clf()
hit = 0
miss = 0
val = 0
yval = 0
data = [] 

mixture = lambda x: .2*utils.norm(x,1,25) + .3*utils.norm(x,-2,1) + .5*utils.norm(x,3,4)
upper_bound = lambda x: 2*utils.norm(x,0,24)

while (hit < 500):
    val = random.gauss(0,math.sqrt(24))
    yval = random.uniform(0,upper_bound(val))
    
    if (yval < mixture(val)):
        hit += 1
        data.append(val)

    else:
        miss += 1

print "HITS: {}".format(hit)
print "MISSES: {}".format(miss)