Ejemplo n.º 1
0
def sum_of_prime_factors(number: int) -> int:
    try:
        return memoized_sum_of_prime_factors[number]
    except KeyError:
        result = sum(filter(prime, factors(number)))
        memoized_sum_of_prime_factors[number] = result
        return result
Ejemplo n.º 2
0
def _clf_mlp(trX,teX,trY,teY):
	print "MLP"
	print trX.shape,"trX shape"
	print "Enter Layer for MLP"
	layer=input()
	# print "enter delIdx"
	# delIdx=input()
	# while(delIdx):
	# 	trX=np.delete(trX,-1,axis=0)
	# 	trY=np.delete(trY,-1,axis=0)
	# 	delIdx=delIdx-1
	print "factors",factors(trX.shape[0])	
	teY=teY.astype(np.int32)
	trY=trY.astype(np.int32)
	print trX.shape,"trX shape"
	print "enter no of mini batch"
	mini_batch=int(input())
	mlp = TfMultiLayerPerceptron(eta=0.01, 
                             epochs=100, 
                             hidden_layers=layer,
                             activations=['relu' for i in range(len(layer))],
                             print_progress=3, 
                             minibatches=mini_batch, 
                             optimizer='adam',
                             random_seed=1)
	mlp.fit(trX,trY)
	pred=mlp.predict(teX)
	print _f_count(teY),"test f count"
	pred=pred.astype(np.int32)
	print _f_count(pred),"pred f count"
	conf_mat=confusion_matrix(teY, pred)
	process_cm(conf_mat, to_print=True)
	print precision_score(teY,pred),"Precision Score"
	print recall_score(teY,pred),"Recall Score"
	print roc_auc_score(teY,pred), "ROC_AUC"
Ejemplo n.º 3
0
 def problem(self):
     l = []
     n = 0
     while len(l) <= 500:
         n += 1
         s = n * (n + 1) / 2
         l = factors(s)
     return s
Ejemplo n.º 4
0
Archivo: p108.py Proyecto: icot/euler
def gennum(ndivs):
    exps = utils.factors(ndivs)
    exps.reverse()
    exps = [exp - 1 for exp in exps]
    primes = utils.prime_sieve(100)
    g = izip(primes, exps)
    n = [pow(item[0], item[1]) for item in g]
    return reduce(lambda x, y: x * y, n)
Ejemplo n.º 5
0
 def problem(self):
     l = []
     n = 0
     while len(l) <= 500:
         n += 1
         s = n * (n + 1) / 2
         l = factors(s)
     return s
Ejemplo n.º 6
0
 def full_factorization(self):
     self.factors = []
     First = True
     if self.rfactors:
         for item in self.rfactors:
             buf = None
             elem = None
             if First and (item % (self.base + self.add) == 0):
                 elem = item//(self.base + self.add)
                 First = False
             else:
                 elem = item
             buf = factors(elem)
             if buf:
                 self.factors.extend(buf)
             else:
                 self.factors.append(elem)
     else:
         self.factors = factors(self.toFrac())
     self.factors.sort()
Ejemplo n.º 7
0
Archivo: p122.py Proyecto: icot/euler
def M(k):
    if k <= 1:
        return 0
    else:
        if isprime(k):
            return (M(k-1) + 1)
        else:
            ds = factors(k)
            ks = map(lambda x: m[x], ds)
            # print k, ds, ks
            return sum(ks)
Ejemplo n.º 8
0
def highlyDivisibleTrangularNumber(n):
    ''' return the first triangular number which has n divisors '''
    if n < 1:
        return 1

    start = 1
    while True:
        tri = start * (start + 1) / 2
        if len(factors(tri)) > n:
            return tri
        start += 1
Ejemplo n.º 9
0
def main():
	pandigital_numbers = set()
	for p in xrange(1, 10000):
		f = factors(p)
		l = len(f) - 1 if len(f) & 1 else len(f)
		for a, b in [(f[i], f[i+1]) for i in xrange(0, l, 2)]:
			n = concat_numbers(a, b, p)
			if is_pandigital(n):
				pandigital_numbers.add(p)
				print "{0} is pandigital since {1}*{2}={3}".format(n, a, b, p)

	print "The answer is", sum(pandigital_numbers)
Ejemplo n.º 10
0
Archivo: p88.py Proyecto: icot/euler
def reductions_full(n):
    fs = factors(n)
    acc = [fs]
    yss = [fs]
    zss = yss
    minl = min(map(len, zss))
    while minl > 2:
        for ys in yss:
            zss = reduct_step(ys)
        acc.append(zss)
        yss = zss
        minl = min(map(len, zss))
    return list(itertools.chain.from_iterable(acc[1:])) + [fs]
Ejemplo n.º 11
0
def main():
    n = int(sys.argv[1])
    must_be_divisible = set(range(2, n + 1))

    p = 1
    for prime in [x for x in must_be_divisible if is_prime(x)]:
        p *= prime

    x = p
    while True:
        x_factors = set(factors(x))
        if must_be_divisible.issubset(x_factors):
            print(x)
            break
        x += p
Ejemplo n.º 12
0
def _clf_softmax(trX, teX, trY, teY):
    print "factors", factors(trX.shape[0])
    print "enter no of mini batch"
    trY = trY.astype(int)
    teY = teY.astype(int)
    mini_batch = int(input())
    clf = TfSoftmaxRegression(eta=0.75,
                              epochs=100,
                              print_progress=True,
                              minibatches=mini_batch,
                              random_seed=1)
    clf.fit(trX, trY)
    pred = clf.predict(teX)
    print _f_count(teY), "test f count"
    pred = pred.astype(np.int32)
    print _f_count(pred), "pred f count"
    conf_mat = confusion_matrix(teY, pred)
    process_cm(conf_mat, to_print=True)
    print precision_score(teY, pred), "Precision Score"
    print recall_score(teY, pred), "Recall Score"
    print roc_auc_score(teY, pred), "ROC_AUC"
Ejemplo n.º 13
0
def _clf_softmax(trX,teX,trY,teY):
	print "factors",factors(trX.shape[0])
	print "enter no of mini batch"
	trY=trY.astype(int)
	teY=teY.astype(int)
	mini_batch=int(input())
	clf = TfSoftmaxRegression(eta=0.75, 
                         epochs=100, 
                         print_progress=True, 
                         minibatches=mini_batch, 
                         random_seed=1)
	clf.fit(trX, trY)
	pred=clf.predict(teX)
	print _f_count(teY),"test f count"
	pred=pred.astype(np.int32)
	print _f_count(pred),"pred f count"
	conf_mat=confusion_matrix(teY, pred)
	process_cm(conf_mat, to_print=True)
	print precision_score(teY,pred),"Precision Score"
	print recall_score(teY,pred),"Recall Score"
	print roc_auc_score(teY,pred), "ROC_AUC"
Ejemplo n.º 14
0
def distinct_primes_factors(*,
                            consecutive_numbers_count: int = 4,
                            distinct_prime_factors_count: int = 4
                            ) -> List[int]:
    numbers_count = 0
    consecutive_numbers = []
    for number in count(1):
        if numbers_count == consecutive_numbers_count:
            break

        prime_factors_count = capacity(filter(prime, factors(number)))

        if prime_factors_count != distinct_prime_factors_count:
            numbers_count = 0
            consecutive_numbers[:] = []
            continue

        consecutive_numbers.append(number)
        numbers_count += 1

    return consecutive_numbers
Ejemplo n.º 15
0
def emulate(program, ip_reg, start=0, cheat=False):
    """
    Emulate the program to load in the value we're solving for.

    Instruction 0 immediately jumps to 17 to begin the loading
    procedure; when we get back to instruction 1, register 1
    already contains the target number.
    """
    ip = 0
    regs = [start, 0, 0, 0, 0, 0]

    while 0 <= ip <= len(program):
        if ip == 1 and cheat:
            return sum(factors(regs[1]))

        cmd, a, b, c = program[ip]
        regs[ip_reg] = ip

        if cmd == 'addr': regs[c] = regs[a] + regs[b]
        elif cmd == 'addi': regs[c] = regs[a] + b
        elif cmd == 'mulr': regs[c] = regs[a] * regs[b]
        elif cmd == 'muli': regs[c] = regs[a] * b
        elif cmd == 'banr': regs[c] = regs[a] & regs[b]
        elif cmd == 'bani': regs[c] = regs[a] & b
        elif cmd == 'borr': regs[c] = regs[a] | regs[b]
        elif cmd == 'bori': regs[c] = regs[a] | b
        elif cmd == 'setr': regs[c] = regs[a]
        elif cmd == 'seti': regs[c] = a
        elif cmd == 'gtir': regs[c] = int(a > regs[b])
        elif cmd == 'gtri': regs[c] = int(regs[a] > b)
        elif cmd == 'gtrr': regs[c] = int(regs[a] > regs[b])
        elif cmd == 'eqir': regs[c] = int(a == regs[b])
        elif cmd == 'eqri': regs[c] = int(regs[a] == b)
        elif cmd == 'eqrr': regs[c] = int(regs[a] == regs[b])

        ip = regs[ip_reg]
        ip += 1

    return regs[0]
Ejemplo n.º 16
0
def _clf_mlp(trX, teX, trY, teY):
    print "MLP"
    print trX.shape, "trX shape"
    print "Enter Layer for MLP"
    layer = input()
    # print "enter delIdx"
    # delIdx=input()
    # while(delIdx):
    # 	trX=np.delete(trX,-1,axis=0)
    # 	trY=np.delete(trY,-1,axis=0)
    # 	delIdx=delIdx-1
    print "factors", factors(trX.shape[0])
    teY = teY.astype(np.int32)
    trY = trY.astype(np.int32)
    print trX.shape, "trX shape"
    print "enter no of mini batch"
    mini_batch = int(input())
    mlp = TfMultiLayerPerceptron(
        eta=0.01,
        epochs=100,
        hidden_layers=layer,
        activations=['relu' for i in range(len(layer))],
        print_progress=3,
        minibatches=mini_batch,
        optimizer='adam',
        random_seed=1)
    mlp.fit(trX, trY)
    pred = mlp.predict(teX)
    print _f_count(teY), "test f count"
    pred = pred.astype(np.int32)
    print _f_count(pred), "pred f count"
    conf_mat = confusion_matrix(teY, pred)
    process_cm(conf_mat, to_print=True)
    print precision_score(teY, pred), "Precision Score"
    print recall_score(teY, pred), "Recall Score"
    print roc_auc_score(teY, pred), "ROC_AUC"
Ejemplo n.º 17
0
from utils import factors
import math

n = 600851475143
p = []
for i in range(3, int(math.sqrt(n))):
    if n % i == 0:
        p.append(i)
f = []
for j in p:
    if factors(j) == []:
        f.append(j)
print(max(f))
Ejemplo n.º 18
0
def sumOfProperDivisors(n):
    return sum(factors(n)) - n
Ejemplo n.º 19
0
from utils import factors, product

facMap = {}
for i in range(2, 21):
    facList = list(factors(i))
    facMapNew = {j: facList.count(j) for j in set(facList)}
    for j in set(list(facMapNew.keys()) + facList):
        if j in facMap:
            if j in facMapNew:
                facMap[j] = max(facMapNew[j], facMap[j])
        else:
            facMap[j] = facMapNew[j]

print(product([i[0]**i[1] for i in facMap.items()]))
Ejemplo n.º 20
0
def latlon_points (xmin, xmax, ymin, ymax, res, dlat_file, prec=64):

    # Number of iterations for latitude convergence
    num_lat_iter = 10

    if xmin > xmax:
        print "Error (latlon_points): looks like your domain crosses 180E. Try again with your longitude in the range (0, 360) instead of (-180, 180)."
        sys.exit()

    # Build longitude values
    lon = np.arange(xmin, xmax+res, res)
    # Update xmax if the range doesn't evenly divide by res
    if xmax != lon[-1]:
        xmax = lon[-1]
        print 'Eastern boundary moved to ' + str(xmax)
    # Put xmin in the range (0, 360) for namelist
    if xmin < 0:
        xmin += 360

    # First guess for latitude: resolution scaled by latitude of southern edge
    lat = [ymin]
    while lat[-1] < ymax:
        lat.append(lat[-1] + res*np.cos(lat[-1]*deg2rad))
    lat = np.array(lat)
    # Now iterate to converge on resolution scaled by latitude of centres
    for iter in range(num_lat_iter):
        lat_old = np.copy(lat)
        # Latitude at centres    
        lat_c = 0.5*(lat[:-1] + lat[1:])
        j = 0
        lat = [ymin]
        while lat[-1] < ymax and j < lat_c.size:
            lat.append(lat[-1] + res*np.cos(lat_c[j]*deg2rad))
            j += 1
        lat = np.array(lat)
    # Update ymax
    ymax = lat[-1]
    print 'Northern boundary moved to ' + str(ymax)

    # Write latitude resolutions to file
    dlat = lat[1:] - lat[:-1]
    write_binary(dlat, dlat_file, prec=prec)

    # Remind the user what to do in their namelist
    print '\nChanges to make to input/data:'
    print 'xgOrigin=' + str(xmin)
    print 'ygOrigin=' + str(ymin)
    print 'dxSpacing=' + str(res)
    print "delYfile='" + dlat_file + "' (and copy this file into input/)"

    # Find dimensions of tracer grid
    Nx = lon.size-1
    Ny = lat.size-1
    # Find all the factors
    factors_x = factors(Nx)
    factors_y = factors(Ny)
    print '\nNx = ' + str(Nx) + ' which has the factors ' + str(factors_x)
    print 'Ny = ' + str(Ny) + ' which has the factors ' + str(factors_y)
    print 'If you are happy with this, proceed with interp_bedmap2. At some point, choose your tile size based on the factors and update code/SIZE.h.'
    print 'Otherwise, tweak the boundaries and try again.'

    return lon, lat
Ejemplo n.º 21
0
A number n is called deficient if the sum of its proper divisors is
less than n and it is called abundant if this sum exceeds n.

As 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16, the
smallest number that can be written as the sum of two abundant numbers
is 24. By mathematical analysis, it can be shown that all integers
greater than 28123 can be written as the sum of two abundant numbers.
However, this upper limit cannot be reduced any further by analysis
even though it is known that the greatest number that cannot be
expressed as the sum of two abundant numbers is less than this limit.

Find the sum of all the positive integers which cannot be written as
the sum of two abundant numbers.
"""

from utils import factors

abundant = [i for i in xrange(12, 28124) if i*2 < sum(factors(i))] #note: factors returns the number original number as well so we need to multiply by 2
abundant = [x for x in abundant if x < 14062]

sums = []
for x in range(len(abundant)):
    for y in range(x):
        if abundant[x] + abundant[y] not in sums:
            sums.append(abundant[x] + abundant[y])

#sums = [abundant[x] + abundant[y] for x in range(len(abundant)) for y in range(x, len(abundant)) if abundant[x] + abundant[y] < 28124]
print sum(x for x in range(28124) if not (x in sums))

Ejemplo n.º 22
0
    def kasiski(self, min_length, max_length, num_elements, max_keylength):
        """
        Function analyses the ciphertext, scanning for repeated elements and then derives the estimated keylength from
        the separation between them. The estimated keylength is the number greater than 1 with the highest count.

        Args:
            min_length (int): the minimum length of an element to search for repetitions: recommended 2 to 3.
            max_length (int): the maximum length of an element to search for repetitions: recommended 8 to 12.
            num_elements (int): the number of discovered repeated elements to search through: recommended 20.
            max_keylength (int): the maximum assumed characters in the key.

        Notes:
            This functions is not optimal. For example if the counters return: (3,12) (5,11) (15,8), then the key length
            estimate could probably be assumed to be 15 as best guess since 3 and 5 both divide 15. But how to calibrate
            these kinds of things are probably best done with neural networks. Here we take only highest score.

        Attributes:
            repeated_elements (list): a list of strings of the discovered repeated elements using above params.
            keylength_analysis (Counter): a counter object that returns the count of the possible keylength spectrums.
            keylength_estimate (array): ordered best guesses for keylength with numbers spanning [1, max_keylength]
        """

        # systematically cycle through the ciphertext adding elements to a list:
        # e.g. 'abcde' for element lengths 2 and 3 becomes: ['ab', 'bc', 'cd', 'de', 'abc', 'bcd', 'cde']
        for k in range(min_length,
                       max_length + 1):  # <- k defines the element length
            cipher_elements = list()
            for i in range(int(len(self.ciphertext) - k)):
                cipher_elements.append(self.ciphertext[i:i + k])
            # attach a counter to see how many times each element is found within the ciphertext
            setattr(self, 'element_counter_' + str(k),
                    Counter(cipher_elements))

        # for the most common repeated elements systematically scan them and ensure they are not subsets of each other
        # e.g. 'ab' and 'bc' will be excluded as a subset of 'abc'.
        repeated_elements = list()
        for k in range(max_length, min_length - 1, -1):
            sub_list = getattr(self, 'element_counter_' +
                               str(k)).most_common(num_elements)
            delattr(self, 'element_counter_' + str(k))
            for i in range(num_elements):
                if any(sub_list[i][0] in element
                       for element in repeated_elements) or sub_list[i][1] < 2:
                    # if True then the subset condition is met or the count is 1 and thus it is not a repeated element
                    pass
                else:
                    # add the detected element to the list of most common repeated elements
                    repeated_elements.append(sub_list[i][0])
        self.repeated_elements = repeated_elements

        # for repeated elements now we detect their location in the ciphertext and calculate the keylength estimate as
        # factors of the difference in positioning.
        factors_list = list()
        for repeated_element in repeated_elements:
            _start = self.ciphertext.index(repeated_element)
            _factors = factors(self.ciphertext[_start +
                                               1:].index(repeated_element) + 1)
            factors_list.extend(_factors)

        # return the attributes to the class object, we take the estimate as the most frequently occurring factor > 1.
        self.keylength_analysis = Counter(factors_list)

        most_common_keylengths = self.keylength_analysis.most_common()

        keylength_estimate = list()
        for tuple in most_common_keylengths:
            if tuple[0] > max_keylength:
                pass
            else:
                keylength_estimate.append(tuple[0])
        undetected = set(list(range(1, max_keylength + 1))).difference(
            set(keylength_estimate))
        keylength_estimate.extend(undetected)
        self.keylength_estimate = keylength_estimate
Ejemplo n.º 23
0
# 1/8	= 	0.125
# 1/9	= 	0.(1)
# 1/10	= 	0.1
# Where 0.1(6) means 0.166666..., and has a 1-digit recurring cycle.
# It can be seen that 1/7 has a 6-digit recurring cycle.
# 
# Find the value of d < 1000 for which 1/d contains
# the longest recurring cycle in its decimal fraction part.

# Solution:
# 
# Find the number < 1000 with the highest discrete log for 10.
# This will be a prime p with discrete-log_10(1) mod p =  p - 1.
# We factor p - 1. Fermats little theorem gives us 10^(p-1) = 1 mod (p)
# Check that 10^x != 1 mod(p) for all proper factors of p-1. Then p - 1
# is the discrete log and thus the cycle length.
# 
# A prime p with max cycle at p-1, will have only one exponent, p - 1 itself,
# for which 10^x == 1 (mod p). So:
#  Find all factors of p - 1
#  Do modexp(10, x, p) for each factor.
#  Count the number of results = 1.
#  Filter for count = 1
#  Take the max of the primes that pass the filter. 

import sys
sys.path.insert(0, '../common/')
import utils

print max(filter(lambda p: map(lambda x: utils.modexp(10, x, p), utils.factors(p-1)).count(1)==1, utils.primes_until(1000)))
Ejemplo n.º 24
0
'''
Find the first four consecutive numbers each to have four distinct prime
factors.
'''
from utils import factors
import itertools

LEN = 4
START = 0
COUNT = 0

for i in itertools.count():
    facs = factors(i)
    if len(set(facs)) >= LEN:
        COUNT += 1
        if not START:
            START = i
    else:
        COUNT = 0
        START = 0

    if COUNT == LEN:
        print START
        break


Ejemplo n.º 25
0
Archivo: p005.py Proyecto: doboy/euler
from utils import factors, product

f = {}
for i in xrange( 2, 21 ):
    ifactors = factors( i )
    for k in factors( i ):
        f[ k ] = max( f.get( k, 0 ), ifactors.get( k, 0 ) )

print product( k ** v for k, v in f.iteritems() )
Ejemplo n.º 26
0
def problem_twelve():
    for tri in triangle():
        num_divisors = len(factors(tri))
        print tri, num_divisors
        if num_divisors > 500:
            return tri
Ejemplo n.º 27
0
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.

#Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n).
#If d(a) = b and d(b) = a, where a ? b, then a and b are an amicable pair and each of a and b are called amicable numbers.
#
#For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.
#
#Evaluate the sum of all the amicable numbers under 10000.

import utils

n = 10000
total = 0

for i in range(2, n + 1):
    x = sum(utils.factors(i)) - i
    if sum(utils.factors(x)) - x == i and i != x:
        total += i

print(total)
Ejemplo n.º 28
0
#! python3
"""What is the largest prime factor of the number 600851475143?"""
import sys
from os.path import dirname
sys.path.insert(0, dirname(dirname(__file__)))
from utils import factors

print(max(factors(600851475143)))
Ejemplo n.º 29
0
def factors_count(number: int) -> int:
    return len(factors(number))
Ejemplo n.º 30
0
Archivo: p47.py Proyecto: icot/euler
#!/usr/bin/python

from utils import factors, FastPrimeSieve

if __name__ == "__main__":
    print "Generating primes"
    primes = FastPrimeSieve(1000000)
    print "Computing"
    n = 1 
    s = 1 
    while s <= 3:
        if n not in primes:
            f = set(factors(n))
            l = len(f)
            if l >= 4 :
                s = s+1
                f1 = set(factors(n-1))
                l1 = len(f1)
                if l1 >= 4 :
                    s = s+1
                    print n-1, f1, n, f, s
                else:
                    s = 1
            else:
                s = 1
        n = n + 2 
    print n-3
        
        

Ejemplo n.º 31
0
def main():
    n = int(sys.argv[1])
    print(max([x for x in factors(n) if is_prime(x)]))
Ejemplo n.º 32
0
def sumOfProperDivisors(n):
    return sum(factors(n)) - n
Ejemplo n.º 33
0
 def test_factors(self):
     cases = {1:[], 2:[2], 3:[3], 4:[2,2], 6:[2,3], 28:[2,2,7]}
     for case in cases.keys():
         result = utils.factors(case)
         if result != cases[case]:
             self.fail("Got the wrong factors for %s: %s " % (case, str(cases[case])))
Ejemplo n.º 34
0
def sum_proper_divisors(n):
    return sum(factors(n))-n
Ejemplo n.º 35
0
def isPrimeGenerating(n):
    fs = factors(n)
    for f in fs:
        if not is_prime(f + int(n/f)):
            return False
    return True
Ejemplo n.º 36
0
28. The first ten terms would be:

                 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...

Let us list the factors of the first seven triangle numbers:

   1: 1
   3: 1,3
   6: 1,2,3,6
  10: 1,2,5,10
  15: 1,3,5,15
  21: 1,3,7,21
  28: 1,2,4,7,14,28

We can see that 28 is the first triangle number to have over five
divisors.

What is the value of the first triangle number to have over five hundred
divisors?
"""

from utils import triangle_number_generator
from utils import factors

tri_gen = triangle_number_generator()

tri = next(tri_gen)
tri_factors = []
while len(factors(tri))<=500:
  tri = next(tri_gen)
print tri
Ejemplo n.º 37
0
def is_abundant(n):
    if sum(utils.factors(n)) - n > n:
        return True
    else:
        return False
Ejemplo n.º 38
0
def num_divisors(n):
    res = 1
    for k, x in groupby(factors(n)):
        exp = len(list(x))
        res *= exp + 1
    return res
Ejemplo n.º 39
0
def factor_sum(n):
    return sum(factors(n, False))
Ejemplo n.º 40
0
import numpy as np
import pandas as pd
from datetime import datetime, timedelta, date
import random
import utils as u

FACTORS = list(u.factors(1440))
filename = './complete_data.csv'
learned_data = './learned_data.csv'
complete_data = pd.DataFrame()
GAMMA = 0.75
threshold = 1
learned_schedule = pd.DataFrame()
TOTAL_TIME = 1440
DIV_SIZE = 10


#Load and save data to and from csv
def save_data():
    complete_data.to_csv(filename)


def load_data():
    global complete_data
    try:
        complete_data = pd.read_csv(filename, index_col=0)
    except FileNotFoundError:
        save_data()
        complete_data = pd.read_csv(filename, index_col=0)

Ejemplo n.º 41
0
Archivo: p23.py Proyecto: kryptn/euler
def abundant(n):
    f = sorted(list(factors(n)))[:-1]
    if sum(f) > n:
        return True
    return False
Ejemplo n.º 42
0
def is_abundant(x):
    return sum(utils.factors(x, True)) > x
Ejemplo n.º 43
0
def sum_proper_divisors(n):
    return sum(factors(n)) - n
Ejemplo n.º 44
0
def num_distinct_factors(n):
    return len(set(utils.factors(n)))
Ejemplo n.º 45
0
def problem_twelve():
    for tri in triangle():
        num_divisors = len(factors(tri))
        print tri, num_divisors
        if num_divisors > 500:
            return tri
Ejemplo n.º 46
0
def evaluate(SMASH, which_dataset, batch_size, seed, validate, num_random,
             num_perturb, num_markov, perturb_prob, arch_SGD, fp16, parallel):

    # Random seeds
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    np.random.seed(seed)

    num_runs = num_random + num_perturb + num_markov
    random_sample = True
    perturb = False
    markov = False

    net = torch.load('weights/' + SMASH + '.pth')
    net.eval()

    # Backwards compatibility hack; If you're trying to understand this code,
    # ignore this line.
    if not hasattr(net, 'factors'):
        net.factors = factors(net.N)

    _, test_loader = get_data_loader(which_dataset=which_dataset,
                                     augment=False,
                                     validate=validate,
                                     batch_size=batch_size)

    # Prepare lists that hold errors
    ensemble_err, err, flops, params = [], [], [], []

    # Array to which we save configurations and errors
    save_archs = []

    # Prepare ensemble predictions
    ensemble_out = torch.zeros(len(test_loader.dataset),
                               net.fc.out_features).cuda()

    # Start the stopwatch and begin testing
    start_time = time.time()
    mode = 'training' if net.training else 'testing'
    print('Evaluating %s in %s mode...' % (SMASH, mode))
    for test in range(num_runs):

        # If we've done all our random samples, switch to random perturbation mode
        if test == num_random:
            sorted_archs = sorted(save_archs, key=lambda item: item[-1])
            print(
                'Random sampling complete with best error of %f, starting perturbation...'
                % (sorted_archs[0][-1]))
            base_arch = sorted_archs[0][:10]
            perturb = True
            random_sample = False

        # If we've done all our perturbations, switch to markov chain mode
        elif test == num_random + num_perturb:
            sorted_archs = sorted(save_archs, key=lambda item: item[-1])
            print(
                'Random perturbation complete with best error of %f, starting markov chain...'
                % (sorted_archs[0][-1]))
            base_arch = sorted_archs[0][:10]
            current_error = sorted_archs[0][-1]
            markov = True

        # Sample a random architecture, as in training
        if random_sample:
            arch = net.sample_architecture()

        # Slightly change a sampled (and, presumably, high-scoring) architecture
        elif perturb:
            arch = perturb_arch.perturb_architecture(net, deepcopy(base_arch),
                                                     perturb_prob)

        #Sample Weights
        w1x1 = net.sample_weights(*arch)

        # Error counters
        e, ensemble_e = 0, 0

        # Loop over validation set
        for i, (x, y) in enumerate(test_loader):

            # Get outputs
            o = net(V(x.cuda(), volatile=True), w1x1, *arch)

            # Get predictions ensembled across multiple configurations
            ensemble_out[i * batch_size:(i + 1) * batch_size] += o.data

            # Update error
            e += o.data.max(1)[1].cpu().ne(y).sum()

            # Update ensemble error
            ensemble_e += ensemble_out[i * batch_size:(i + 1) *
                                       batch_size].max(1)[1].cpu().ne(y).sum()

        # Save ensemble error thus far
        ensemble_err.append(float(ensemble_e) / ensemble_out.size(0))

        # Save individual error thus far
        err.append(float(e) / ensemble_out.size(0))

        # While in markov mode, update the base arch if we get a better SMAS hscore.
        if markov and err[-1] < float(current_error):
            print(
                'Error of %f superior to error of %f, accepting new architecture...'
                % (err[-1], current_error))
            base_arch = arch
            current_error = err[-1]

        # Save relevant architectural details along with error
        save_archs.append(arch +
                          (net.N, net.N_max, net.bottleneck,
                           net.max_bottleneck, net.in_channels, 0, err[-1]))

        params.append(count_params(save_archs[-1]))
        flops.append(count_flops(save_archs[-1], which_dataset))
        print(
            'For run #%d/%d, Individual Error %2.2f Ensemble Err %2.2f, params %e, flops %e,  Time Elapsed %d.'
            % (test, num_runs, 100 * err[-1], 100 * ensemble_err[-1],
               params[-1], flops[-1], time.time() - start_time)
        )  #LogSof EnsErr %d, Softmax EnsErr %d ensemble_olgs_err[-1],  ensemble_os_err[-1],

    best_acc = sorted(err)[0]
    worst_acc = sorted(err)[-1]
    least_flops = sorted(flops)[0]
    most_flops = sorted(flops)[-1]
    least_params = sorted(params)[0]
    most_params = sorted(params)[-1]
    print('Best accuracy is ' + str(best_acc) + ', Worst accuracy is ' +
          str(worst_acc))

    # Save results
    # np.savez(filename[:-4] + '_' + mode + '_errors.npz', **{'err':err, 'ensemble_err':ensemble_err})
    # save_archs = sorted(save_archs, key = lambda item: item[-1])
    np.savez(
        SMASH + '_archs.npz', **{
            'archs': sorted(save_archs, key=lambda item: item[-1]),
            'unsorted_archs': save_archs
        })
Ejemplo n.º 47
0
 def __init__(self, exp, base, add):
     self.exp = exp
     self.base = base
     self.add = add
     self.exp_factors = factors(self.exp)
     self.algebraic_factors = self.algebraic_factorization()
Ejemplo n.º 48
0
#! python3
"""Find the first four consecutive integers to have four distinct prime factors
each. What is the first of these numbers?"""
import sys
from os.path import dirname
sys.path.insert(0, dirname(dirname(__file__)))
from utils import factors

N = 4
i = 1
while True:
    i += 1
    for j in range(i, i + N):
        if len(set(factors(j))) != N:
            break
        i += 1
    else:
        print(i, j)
        break
Ejemplo n.º 49
0
"""
Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n).
If d(a) = b and d(b) = a, where a < b, then a and b are an amicable pair and each of a and b are called amicable numbers.

For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.

Evaluate the sum of all the amicable numbers under 10000.
"""
from utils import factors
    
amicable = []
for i in range(1, 10000):
    da = sum(factors(i)[:-1])
    #print i, da
    if da > i:
        db = sum(factors(da)[:-1])
        #print db
        if db == i:
            amicable += [i, da]
print sum(amicable)
Ejemplo n.º 50
0
def primes(n=100):
    p = []
    for i in range(2, n):
        if factors(i) == []:
            p.append(i)
    return p
Ejemplo n.º 51
0
 def factor_sum(self, num):
     return sum(factors(num)) - num
Ejemplo n.º 52
0
                 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...

Let us list the factors of the first seven triangle numbers:

   1: 1
   3: 1,3
   6: 1,2,3,6
  10: 1,2,5,10
  15: 1,3,5,15
  21: 1,3,7,21
  28: 1,2,4,7,14,28

We can see that 28 is the first triangle number to have over five
divisors.

What is the value of the first triangle number to have over five hundred
divisors?
"""

import utils

i = 1
triangle = 0
while 1:
    triangle = sum(range(i+1))
    f = utils.factors(triangle)
    if len(f) > 500:
        break
    i += 1
print triangle