def average(self):
   import math
   avgexp = Experiment("Average", "gene_average.csv", "average")
   for gene in self.gene_list:
     gene_vals = []
     for e in self.experiments:
       gene_vals.append(e.ratios[gene])
     avgexp.ratios[gene] = math.sum(gene_vals) / float(len(gene_vals))
   self.experiments = [avgexp]
Ejemplo n.º 2
0
def k_nearest_get_class_color(point, points, points_labels, k=5):
    dists = []
    for pin in range(len(points)):
        dists.append((math.sqrt(math.sum((math.pow(points[pin][i] - point[i],2) for i in range(len(point))))), points_labels[pin]))
    dists.sort()
    # find class based on closest k neighbors
    i = 0
    classcount = defaultdict(int)
    for dist, p_label in dists:
        classcount[p_label] += 1
        if i >= k:
            break    
        i += 1

    return max(classcount.iteritems(), key=operator.itemgetter(1))[0]
Ejemplo n.º 3
0
 def J(theta, X_b, y):
     """
     多个样本时的损失函数
     :param theta:
     :param X_b:
     :param y:
     :return:
     """
     y_hat = self._sigmoid(X_b.dot(theta))
     try:
         return -math.sum(y * math.log(y_hat) +
                          (1 - y) * math.log(1 - y_hat)) / len(y)
     # 两种情况下的表示,一定要使用math中的函数,不要用numpy
     # (numpy的同名函数针对的是矩阵而非理想的结果)
     except:
         return float('inf')
Ejemplo n.º 4
0
'''
we can import a module in many ways
each way works differently

'''

# import the module
import math
'''
In this type of import
we always have to access 
resources inside math module 
using dot( . ) operator

math.sum(3, 4)

'''

# import specific fucntion or class from a module
from math import sum, sub, divide
'''
We can dirctly call these function without any reference

sum(2, 3)
sub(8, 2)
divide(12, 3)

'''
# import all
from math import *
sum(2, 3)
Ejemplo n.º 5
0
'''
if we want to access any resource
of math module we need to import 
this file first.
'''

import math

x = int(input("Enter first number: "))
y = int(input("Enter second number: "))

sum = math.sum(x, y)
sub = math.sub(x, y)
divide = math.divide(x, y)
multiply = math.multi(x, y)

print("\nSum: ", sum, "\nSubstruct: ", sub, "\nDivide: ", divide,
      "\nMultiply: ", multiply)
import math

pow(3, 2)
for i in range(1, 11):
    print(pow(i, 2))  # for print the square of first 10 numbers
    print(pow(i, 3))  # for print the cube of first 10 numbers
    print(pow(i, 0.5))  # for print the square root of first 10 numbers

lst = [11, 32, 34, 54, 67, 87, 24, 55, 36, 87]
max(lst)  # returns the maximum numbers
min(lst)  # returns the minimum numbers
math.ceil(5)  # ceil and floor numbers
math.copysign(5, -1)  # copy the sign of y
math.factorial(5)  # returns the factorial of the numbers
math.fmod(40, 6)  # returns the remainder
math.frexp(5)  # returns the exponent of x as the pair of (m,e)
math.sum(lst)  # returns the sum of the values present
math.exp(5)  # returns the exponent of (e,5)
math.expm1(6)  # returns the value of e**x-1
math.sqrt(5)  # returns the square root of x
math.gcd(46, 84)  # returns the greatest common divisor
math.lcm(25, 20)  # returns the least common multiple
math.nextafter(
    34, 36
)  # returns the next number in floting point either towards zero/opposite
math.prod(lst)  # returns the product of all availale iterables
math.remainder(6, 5)  # returns the reaminder
 def tot_num_characters(self):
     """A property method to return the total number of characters in the current sentence"""
     return math.sum(
         [len(word) for word in self._stanza_doc.sentences[0].words])
Ejemplo n.º 8
0
def sumTwoNumbers(first, second):
    number = math.sum(first + second)
    return number
Ejemplo n.º 9
0
for i in range(training_nums):
    start_time = time.time()
    images_batch, labels_batch = sess.run([images_ph, labels_ph])
    _, loss_value = sess.run([train_step, loss],
                             feed_dict={
                                 images_ph: images_batch,
                                 labels_ph: labels_batch
                             })

    duration = time.time() - start_time
    if i % 10 == 0:
        exa_per_sec = batch_size / duration
        sec_pre_bat = float(duration)
        format_str = 'step: %d, losses= %.2f (%.1f examples/second & %.3f seconds/bacth)'
        print(format_str % (i, loss_value, exa_per_sec, sec_pre_bat))
"""14.开始测试"""
import math
num_examples = 10000
num_iter = int(math.ceil(num_examples / batch_size))
num_total = num_iter * batch_size
true_count = 0
for i in range(num_iter):
    images_batch, labels_batch = sess.run([images_test, labels_test])
    predictions = sess.run([topKAc],
                           feed_dict={
                               images_ph: images_test,
                               labels_ph: labels_batch
                           })
    true_count += math.sum(predictions)
print('Accuarcy @Top 1 is :%.3f' % (true_count / num_total))
Ejemplo n.º 10
0
import math

print math.sum(1,5)
Ejemplo n.º 11
0
def pool(dim,
         tensor,
         kernel_size=3,
         stride=None,
         dilation=1,
         padding=0,
         bound='zero',
         reduction='mean',
         return_indices=False,
         affine=None):
    """Perform a pooling

    Parameters
    ----------
    dim : {1, 2, 3}
        Number of spatial dimensions
    tensor : (*batch, *spatial_in) tensor
        Input tensor
    kernel_size : int or sequence[int], default=3
        Size of the pooling window
    stride : int or sequence[int], default=`kernel_size`
        Strides between output elements.
    dilation : int or sequece[int], default=1
        Strides between elements of the kernel.
    padding : 'auto' or int or sequence[int], default=0
        Padding performed before the convolution.
        If 'auto', the padding is chosen such that the shape of the
        output tensor is `spatial_in // stride`.
    bound : str, default='zero'
        Boundary conditions used in the padding.
    reduction : {'mean', 'max', 'min', 'median', 'sum'} or callable, default='mean'
        Function to apply to the elements in a window.
    return_indices : bool, default=False
        Return input index of the min/max/median element.
        For other types of reduction, return None.
    affine : (..., D+1, D+1) tensor, optional
        Input orientation matrix

    Returns
    -------
    pooled : (*batch, *spatial_out) tensor
    indices : (*batch, *spatial_out, dim) tensor, if `return_indices`
    affine : (..., D+1, D+1) tensor, if `affine`

    """
    # move everything to the same dtype/device
    tensor = torch.as_tensor(tensor)

    # sanity checks + reshape for torch's conv
    batch = tensor.shape[:-dim]
    spatial_in = tensor.shape[-dim:]
    tensor = tensor.reshape([-1, *spatial_in])

    # Perform padding
    kernel_size = make_list(kernel_size, dim)
    stride = make_list(stride or None, dim)
    stride = [st or ks for st, ks in zip(stride, kernel_size)]
    dilation = make_list(dilation or 1, dim)
    padding = make_list(padding, dim)
    padding0 = padding  # save it to update the affine
    for i in range(dim):
        if isinstance(padding[i], str) and padding[i].lower() == 'auto':
            if kernel_size[i] % 2 == 0:
                raise ValueError('Cannot compute automatic padding '
                                 'for even-sized kernels.')
            padding[i] = ((kernel_size[i] - 1) * dilation[i] + 1) // 2

    use_torch = reduction in ('mean', 'avg', 'max') and dim in (1, 2, 3)

    if (not use_torch) or bound != 'zero' and sum(padding) > 0:
        # torch implementation -> handles zero-padding
        # our implementation -> needs explicit padding
        tensor = utils.pad(tensor, padding, bound, side='both')
        padding = [0] * dim

    return_indices0 = False
    pool_fn = reduction if callable(reduction) else None

    if reduction in ('mean', 'avg'):
        return_indices0 = True
        return_indices = False
        pool_fn = (F.avg_pool1d if dim == 1 else F.avg_pool2d
                   if dim == 2 else F.avg_pool3d if dim == 3 else None)
        if pool_fn:
            pool_fn0 = pool_fn
            pool_fn = lambda x, *a, **k: pool_fn0(
                x[:, None], *a, **k, padding=padding, dilation=dilation)[:, 0]
    elif reduction == 'max':
        pool_fn = (F.max_pool1d if dim == 1 else F.max_pool2d
                   if dim == 2 else F.max_pool3d if dim == 3 else None)
        if pool_fn:
            pool_fn0 = pool_fn
            pool_fn = lambda x, *a, **k: pool_fn0(
                x[:, None], *a, **k, padding=padding, dilation=dilation)[:, 0]

    if not pool_fn:
        if reduction not in ('min', 'max', 'median'):
            return_indices0 = True
            return_indices = False
        if reduction == 'mean':
            reduction = lambda x: math.mean(x, dim=-1)
        elif reduction == 'sum':
            reduction = lambda x: math.sum(x, dim=-1)
        elif reduction == 'min':
            reduction = lambda x: math.min(x, dim=-1)
        elif reduction == 'max':
            reduction = lambda x: math.max(x, dim=-1)
        elif reduction == 'median':
            reduction = lambda x: math.median(x, dim=-1)
        elif not callable(reduction):
            raise ValueError(f'Unknown reduction {reduction}')
        pool_fn = lambda *a, **k: _pool(*a, **k, reduction=reduction)

    outputs = []
    if return_indices:
        tensor, ind = pool_fn(tensor, kernel_size, stride=stride)
        ind = utils.ind2sub(ind, stride)
        ind = utils.movedim(ind, 0, -1)
        outputs.append(ind)
    else:
        tensor = pool_fn(tensor, kernel_size, stride=stride)
        if return_indices0:
            outputs.append(None)

    spatial_out = tensor.shape[-dim:]
    tensor = tensor.reshape([*batch, *spatial_out])
    outputs = [tensor, *outputs]

    if affine is not None:
        affine, _ = affine_conv(affine,
                                spatial_in,
                                kernel_size=kernel_size,
                                stride=stride,
                                padding=padding0,
                                dilation=dilation)
        outputs.append(affine)

    return outputs[0] if len(outputs) == 1 else tuple(outputs)
Ejemplo n.º 12
0
import math


# Z = normalization factor
z = []

alpha = []
# Distribution
D = []
# Hypothesis
h = []
m = 30
y = [0,1]

# ERROR
epsilon = []
# REFER TO SLIDES BOOSTING
D[0] = 1 / m
for i in z:
    z[i] = math.sum(D(i)*math.e^{-alpha[i]*y[i]*h[i]})
for i in D[1:m]:
        D[i+1] = (D[i]/z[i])* math.e^{-alpha[i]*y[i]*h[i]}
for i in alpha:
    alpha[i] = 1/2*math.log((1-epsilon[i])/epsilon[i], 2)
Ejemplo n.º 13
0
def next_triangle_number(n):
    return sum(range(n))
Ejemplo n.º 14
0
def statistics(a):
	mean = math.sum(a) / len(a)
	std = math.std(a)
	high_score = max(a)
	low_score = min(a)
	return mean, std, high_score, low_score
Ejemplo n.º 15
0
def average(a: float, b: float, c: float) -> float:
    return math.sum([a, b, c]) / len([a, b, c])
Ejemplo n.º 16
0
import math

print math.sum(1, 5)
Ejemplo n.º 17
0
def next_triangle_number(n):
    return sum(range(n))
Ejemplo n.º 18
0
)  # расчет обратного гессиана осуществляется напрямую, без использоования оптимального алгоритма (потери во времени и памяти)
diag = t.diagonal(hess_inv, 0)
cnt = 0
for g in net1.parameters():
    net_squeezed = g.contiguous().view(-1) if cnt == 0 else t.cat(
        [net_squeezed, g.contiguous().view(-1)])
    cnt = 1
L_arr = t.abs(t.div(t.square(net_squeezed), diag))
# print(L_arr)
# masks = make_masks(L_arr, net1, loss_val.item(),  # для OBS - значения для всех слоев одинаковы
#                    5 * (10 ** 10),
#                    5 * (10 ** 10),
#                    5 * (10 ** 10),
#                    5 * (10 ** 10))
masks = make_masks(
    L_arr,
    net1,
    loss_val.item(),  # для L-OBS - значения для каждого слоя своё
    5 * (10**3),
    5 * (10**10),
    1 * (10**10),
    5 * (10**2))
for m in masks:
    print("Число единиц в маске: " + str(m.sum()) + " ; Форма маски: " +
          str(m.shape))
    pass
print()
# l_print(L_arr, net1.k, net1.l)
prune_by_masks(net1, masks)
net_print(net1)
Ejemplo n.º 19
0
 def test_fsum_method(self):
     self.assertEqual(math.sum([1.1, 2.2], 4.4))
Ejemplo n.º 20
0
import math

print(math.factorial(2))
ls = [1, 3, 4, 5, 6, 2, 9]
print(math.sum(2, 3))
print(math.div(10, 5))
print(math.min(ls))
print(math.max(ls))

#etc
Ejemplo n.º 21
0
from math import sum, minus
x = int(input("Enter the first number: "))
y = int(input("Enter the second number: "))
z = sum(x, y)
print("The addition is: ", z)