Exemplo n.º 1
0
def P(s, N):
    if s == 1:
        return (N - 1) / 2
    k = 1
    for i in range(1, s + 1):
        k = lcm(k, i)

    l = lcm(k, s + 1)

    res = 0
    for i in range(k + 1, N, k):
        if i % l != 1:
            res += 1

    return res
Exemplo n.º 2
0
def solve(r):
    ans = 1
    for i in range(2, r + 1):
        print ans,
        ans = lcm(ans, i)
        print i, ans
    return ans
Exemplo n.º 3
0
  def calculate_invariant_rows(self):
    """Calculate the rows of the invariant matrix"""
    inv_rows = self.calculate_initial_rows()
    # Lets begin by just trying to remove 
    for index in range(self.kig.get_num_reactions()):
      num_rows = len(inv_rows)
      new_inv_rows = []
      for i in range(num_rows):
        i_row   = inv_rows[i]
        i_value = int(i_row.row[index])
        if i_value == 0:
          new_inv_rows.append(i_row)
        else: 
          for j in range(i+1, num_rows):
            j_row   = inv_rows[j]
            j_value = int(j_row.row[index])
            if ((i_value < 0 and j_value > 0) or 
                (i_value > 0 and j_value < 0)):
              target_value = utils.lcm (abs(i_value), abs(j_value)) 
              i_coeff = target_value / abs(i_value)
              j_coeff = target_value / abs(j_value)

              new_row = self.combine_invariant_rows(i_row, j_row, 
                                                    coeff1=i_coeff,
                                                    coeff2=j_coeff)
              new_inv_rows.append(new_row)
      # new_inv_rows = [ r for r in inv_rows if r.row[index] == 0 ]
      inv_rows = new_inv_rows
    return inv_rows
Exemplo n.º 4
0
def cmvns_l(cur_val, B, y, Bl, n_neg, p_find, pri_S = None, pri_M = None, n_cycles=1, pri_S_type='square'):
    """
    Metropolis samples cur_val, under the constraint that B*cur_val < y, 
    with likelihood term corresponding to n_neg negative observations independent
    with probabilities p_find if Bl*cur_val>0, else 0.
    """
    
    cur_val = np.asarray(cur_val).squeeze()
    B = np.asarray(B)
    Bl = np.asarray(Bl)
    
    # Change coordinates so that the elements of cur_val are standard normal.
    if pri_M is not None:
        pri_M =np.asarray(pri_M).squeeze()
        cur_val = cur_val - pri_M
    
    if pri_S is not None:
        pri_S = np.asarray(pri_S).squeeze()
        if pri_S_type == 'square':
            new_val = np.linalg.solve(pri_S, cur_val)
            B = np.dot(B,pri_S)
            Bl = np.dot(Bl, pri_S)
        elif pri_S_type == 'diag':
            new_val = cur_val / pri_S
            B = B*pri_S
            Bl = Bl*pri_S
        elif pri_S_type == 'tri':
            new_val = pm.gp.trisolve(pri_S, cur_val, uplo='L', transa='N')
            B = np.dot(B,pri_S)            
            Bl = np.dot(Bl,pri_S)
        else:
            raise ValueError, 'Prior matrix square root type %s not recognized.'%pri_S_type
    else:
        new_val = cur_val.copy()
    
    if np.any(np.dot(B,new_val) > y):
        # Adjust in case of numerical problems.
        new_val = new_val + 1e-5
        if np.any(np.dot(B,new_val) > y):
            raise ValueError, 'Starting values do not satisfy constraints.'
    
    # Do the specified number of cycles.
    n = len(cur_val)
    y_ = y-np.dot(B,new_val)
    lop = np.dot(Bl,new_val)
    u=np.random.random(size=(n,n_cycles)).copy('F')
    um=np.random.random(size=(n,n_cycles)).copy('F')
    # Call to Fortran routine lcg, which overwrites new_val in-place. Number of
    # cycles is determined by size of u.
    acc, rej = lcm(np.asarray(B,order='F'), y_, new_val, u, np.asarray(Bl,order='F'), n_neg, p_find, um, lop)
    
    # Change back to original coordinates and return.
    if pri_S is not None:
        if pri_S_type == 'square' or pri_S_type == 'tri':
            new_val = np.dot(pri_S, new_val)
        else:
            new_val *= pri_S    
    
    return new_val, acc, rej
Exemplo n.º 5
0
def make_common_divisor(numerators,denominators):
    if len(numerators) != len(denominators):
        raise Exception("Numerators (%s) must have the same length as "\
                "denominators (%s)"%(len(numerators),len(denominators)))
    this_lcm = lcm(*denominators)
    for idx, den in enumerate(denominators):
        numerators[idx] = (this_lcm / den) * numerators[idx]
    return numerators,[this_lcm]*len(numerators)
Exemplo n.º 6
0
    def add_task(self, task_name, **kwargs):

        # TODO ensure in path
        try:
            delta = timedelta(**kwargs)
        except TypeError:
            raise Exception('Bad arguments for timedelta')
        self._tasks.append((task_name, delta))
        print('task %s scheduled.' % task_name)

        deltas = [d.total_seconds() for _, d in self._tasks]
        self._reset_delta = lcm(deltas)
        print('setting reset_delta to %s' % self._reset_delta)
Exemplo n.º 7
0
 def pre_process(self):
     pfracs = []
     for pside in self.psides:
         pfracs.append(Fraction(Decimal(str(pside))))
     denominators = [pfrac.denominator for pfrac in pfracs]
     self.L = lcm(denominators)
     if self.L > MAX_LIST_SIZE:
         ex = OverflowError('The resulting value for L (least common multiplier)' \
                 ' for the given list of fractions is bigger than allowed.' \
                 '. Please try again with a different list of psides.')
         ex.L = self.L
         ex.MAX_LIST_SIZE = MAX_LIST_SIZE
         raise ex
     self.A = []
     for i,pfrac in enumerate(pfracs):
         size = int(self.L*pfrac)
         for j in range(size):
             self.A.append(i)
Exemplo n.º 8
0
def old_integer_coeff(float_list):
    relations = []
    possibilities = []
    for fl1 in float_list:
        if almost_equal(fl1, 0.0, tolerance=TOLERANCE):
            # No es un buen candidato para buscar MCM
            continue
        aux = []
        fl1 = '%s'%abs(fl1)
        rat1 = Fraction(fl1)
        for fl2 in float_list:
            if almost_equal(fl2, 0.0, tolerance=TOLERANCE):
                aux.append(0.0)
                continue
            modif2 = -1 if fl2 < 0 else 1
            fl2 = '%s'%(abs(fl2))
            rat2 = Fraction(fl2)
            rel = rat1/rat2
            rel = format(modif2*float(rel), '.%dg'%TRUNCATE)
            rel = Fraction(rel)
            aux.append(rel)
        relations.append(aux)
        mcm = float(lcm(*[abs(x) for x in aux if x!= 0]))
        integerified = [x and int(mcm/x) or 0  for x in aux]
        possibilities.append(integerified)
        #if all([x.is_integer() for x in integerified]):
        #    return [int(x) for x in integerified]
    ret = None
    sup = None
    # Busco la "mejor" inecuación
    # (en el sentido de menor +:abs() de los coef y el TI)
    # de entre las posibles
    for pos in possibilities:
        max_val = 0
        for coef in pos:
            max_val += abs(coef)
        if sup is None or max_val < sup:
            sup = max_val
            ret = pos
    return ret
Exemplo n.º 9
0
Arquivo: perm.py Projeto: dt1483/SnFFT
 def order(self):
     # get lcm of cycle lengths
     cycle_lengths = [len(x) for x in self.cycle_decomposition]
     return lcm(*cycle_lengths)
Exemplo n.º 10
0
def compute():
    # Find the greatest number of prime factors
    # for each prime and multiply them (find the least common multiple).
    # By hand, this gives prod([2**4, 3**2, 5, 7, 11, 13, 17, 19])
    # We will use the lcm function from utils here for clarity
    return lcm(*range(2, 20))
Exemplo n.º 11
0
def analyze_profile(required, provided, config, options):
    """
    * Calculates the hyperperiod of the profiles
    * Repeats the profiles for the specified number of hyperperiods in *options*
    * Analyzes the requested profiles
    * If more than one hyper-period has been specified it determines system stability
    * Optionally plots the bandwidths and data for the profiles

    :param in required: :class:`networkProfile.Profile` describing the required profile
    :param in provided: :class:`networkProfile.Profile` describing the provided profile
    :param in config: :class:`networkConfig.Config` describing the configuration of the network
    :param in options: :class:`Options` describing the program options for drawing and analysis

    Returns a list of analysis results consisting of::

      [ output, remaining, max delay, max buffer ]

    * The output profile as a :class:`networkProfile.Profile` generated by calling **required.** :func:`networkProfile.Profile.Convolve` ( provided )
    * The remaining capacity profile as a :class:`networkProfile.Profile` which is determined as :math:`remaining = (provided - output)`
    * The delay structure generated by calling **required.** :func:`networkProfile.Profile.CalcDelay` ( output )
    * The buffer structure generated by calling **required.** :func:`networkProfile.Profile.CalcBuffer` ( output )
    """
    num_periods = options.num_periods
    nc_mode = options.nc_mode
    nc_step_size = options.nc_step_size
    print_profiles = options.print_profiles
    plot_dict = options.plot_dict
    plot_line_width = options.plot_line_width

    topology = config.topology
    routes = config.routes
    multicast = config.multicast
    retransmit = config.retransmit

    # CALCULATE HYPERPERIOD
    hyperPeriod = lcm(required.period, provided.period)
    # print "\nCalculated hyperperiod as {} seconds".format(hyperPeriod)

    # REPEAT PROFILES FOR THE RIGHT NUMBER OF HYPERPERIODS
    required.Repeat((hyperPeriod / required.period) * num_periods)
    provided.Repeat((hyperPeriod / provided.period) * num_periods)

    # INTEGRATE THE PROFILES FOR ANALYSIS
    provided.Integrate(hyperPeriod * num_periods)
    required.Integrate(hyperPeriod * num_periods)

    # CONVOLVE REQUIRED WITH PROVIDED TO PRODUCE OUTPUT
    output = required.Convolve(provided)
    output.period = hyperPeriod
    # CALCULATE SENDER-SIDE BUFFER AND DELAY FROM OUTPUT AND REQUIRED
    maxBuffer = required.CalcBuffer(output)
    maxDelay = required.CalcDelay(output)

    # delay the output according to the latency of the node's link
    # this determines the characteristics of the data at the receiver end
    received = output.Delay(provided)
    received.Kind("received")
    received.period = hyperPeriod

    # calculate the remaining capacity of the node's link
    remaining = provided.SubtractProfile(output)
    remaining.Kind("leftover")
    remaining.period = hyperPeriod
    remaining.Integrate(hyperPeriod * num_periods)

    # optionally analyze this using NC:
    if nc_mode:
        provided_nc = provided.ConvertToNC(min, nc_step_size)
        required_nc = required.ConvertToNC(max, nc_step_size)
        output_nc = required_nc.Convolve(provided_nc)
        maxBuffer_nc = required_nc.CalcBuffer(output_nc)
        maxDelay_nc = required_nc.CalcDelay(output_nc)

    # Print out analysis info
    print bcolors.OKBLUE + "\tMax buffer (time, bits): [{}, {}]".format(maxBuffer[0], maxBuffer[2])
    print "\tMax delay (time, seconds): [{}, {}]".format(maxDelay[0], maxDelay[2]) + bcolors.ENDC

    if nc_mode:
        print bcolors.OKBLUE + "\tMax buffer NC (time, bits): [{}, {}]".format(maxBuffer_nc[0], maxBuffer_nc[2])
        print "\tMax delay NC (time, seconds): [{}, {}]".format(maxDelay_nc[0], maxDelay_nc[2]) + bcolors.ENDC

    # DETERMINE SYSTEM STABILITY IF WE HAVE MORE THAN ONE HYPERPERIOD TO ANALYZE
    if num_periods > 1:
        reqDataP1 = required.GetValueAtTime("data", hyperPeriod)
        reqDataP2 = required.GetValueAtTime("data", 2 * hyperPeriod)
        outDataP1 = output.GetValueAtTime("data", hyperPeriod)
        outDataP2 = output.GetValueAtTime("data", 2 * hyperPeriod)
        buff1 = reqDataP1 - outDataP1
        buff2 = reqDataP2 - outDataP2
        # If the buffer size increases between periods, the system is not stable.
        if buff2 > buff1:
            print bcolors.FAIL + "WARNING: BUFFER UTILIZATION NOT CONSISTENT THROUGH ANALYZED PERIODS"
            print "\t APPLICATION MAY HAVE UNBOUNDED BUFFER GROWTH ON NETWORK\n" + bcolors.ENDC

    if plot_dict["plot"]:
        profList = [required, provided, output, remaining, received]
        for key in plot_dict:
            profList = [x for x in profList if key not in x.kind]
        plot_bandwidth_and_data(profList, maxDelay, maxBuffer, num_periods, plot_line_width)
        if nc_mode:
            profList = [required_nc, provided_nc, output_nc]
            plot_bandwidth_and_data(profList, maxDelay_nc, maxBuffer_nc, num_periods, plot_line_width)

    # Shrink the profiles back down so that they can be composed with other profiles
    received.Shrink(received.period)
    output.Shrink(output.period)
    remaining.Shrink(remaining.period)
    provided.Shrink(provided.period)
    required.Shrink(required.period)

    return output, remaining, received, maxBuffer, maxDelay
Exemplo n.º 12
0
 def solve(self):
     result = 1
     for value in xrange(2, self.num):
         result = lcm(result, value)
     return result
Exemplo n.º 13
0
# Problem 5

# real 0m0.103s
# user 0m0.057s
# sys  0m0.017s

import math
from utils import lcm
cur_lcm = 1

for i in range(20,1,-1):
    cur_lcm = lcm(cur_lcm,i)
print(cur_lcm)
Exemplo n.º 14
0
from functools import reduce
from utils import gcd, lcm

# or from math import gcd


# This is way too slow:
@jit(nopython=True)
def divides_2to20(count):
    found = True
    for i in range(2, 11):
        if count % i != 0:
            # print(count,i)
            break
        if i == 20:
            found = False
    return found


assert lcm(5, 2) == 10
assert lcm(2, 5) == 10

assert lcm(12, 4) == 12
assert lcm(15, 6) == 30

print("The smallest number divisilbe by the numbers 2-10 is : ",
      reduce(lcm, range(2, 11)))

print("The smallest number divisilbe by the numbers 2-10 is : ",
      reduce(lcm, range(2, 21)))
def avg_normalized_happiness(pred, gift, wish):
    n_children = 1000000  # n children to give
    n_gift_type = 1000  # n types of gifts available
    n_gift_quantity = 1000  # each type of gifts are limited to this quantity
    n_gift_pref = 100  # number of gifts a child ranks
    n_child_pref = 1000  # number of children a gift ranks
    twins = math.ceil(0.04 * n_children / 2.) * 2  # 4% of all population, rounded to the closest number
    triplets = math.ceil(0.005 * n_children / 3.) * 3  # 0.5% of all population, rounded to the closest number
    ratio_gift_happiness = 2
    ratio_child_happiness = 2

    # check if triplets have the same gift
    for t1 in np.arange(0, triplets, 3):
        triplet1 = pred[t1]
        triplet2 = pred[t1 + 1]
        triplet3 = pred[t1 + 2]
        # print(t1, triplet1, triplet2, triplet3)
        assert triplet1 == triplet2 and triplet2 == triplet3

    # check if twins have the same gift
    for t1 in np.arange(triplets, triplets + twins, 2):
        twin1 = pred[t1]
        twin2 = pred[t1 + 1]
        # print(t1)
        assert twin1 == twin2

    max_child_happiness = n_gift_pref * ratio_child_happiness
    max_gift_happiness = n_child_pref * ratio_gift_happiness
    total_child_happiness = 0
    total_gift_happiness = np.zeros(n_gift_type)

    for i in range(len(pred)):
        child_id = i
        gift_id = pred[i]

        # check if child_id and gift_id exist
        assert child_id < n_children
        assert gift_id < n_gift_type
        assert child_id >= 0
        assert gift_id >= 0
        child_happiness = (n_gift_pref - np.where(wish[child_id] == gift_id)[0]) * ratio_child_happiness
        if not child_happiness:
            child_happiness = -1

        gift_happiness = (n_child_pref - np.where(gift[gift_id] == child_id)[0]) * ratio_gift_happiness
        if not gift_happiness:
            gift_happiness = -1

        total_child_happiness += child_happiness
        total_gift_happiness[gift_id] += gift_happiness

    denominator1 = n_children * max_child_happiness
    denominator2 = n_gift_quantity * max_gift_happiness * n_gift_type
    common_denom = lcm(denominator1, denominator2)
    multiplier = common_denom / denominator1

    print(multiplier, common_denom)
    child_hapiness = math.pow(total_child_happiness * multiplier, 3) / float(math.pow(common_denom, 3))
    santa_hapiness = math.pow(np.sum(total_gift_happiness), 3) / float(math.pow(common_denom, 3))
    print('Child hapiness: {}'.format(child_hapiness))
    print('Santa hapiness: {}'.format(santa_hapiness))
    ret = child_hapiness + santa_hapiness
    return ret
Exemplo n.º 16
0
def largest_multiple(numbers):
    return reduce(lambda a, b: lcm(a, b), numbers)
Exemplo n.º 17
0
def train(profile):
    stt_config = profile.speech_to_text
    intent_config = profile.intent

    # Load sentence templates, write examples
    sentences_ini_path = profile.read_path(stt_config['sentences_ini'])

    # Load from ini file and write to examples file
    words_needed = set()
    sentences_by_intent = defaultdict(list)
    grammars_dir = profile.write_dir(stt_config['grammars_dir'])

    with open(sentences_ini_path, 'r') as sentences_ini_file:
        grammar_paths = jsgf_utils.make_grammars(sentences_ini_file, grammars_dir)

        # intent -> sentence templates
        tagged_sentences = jsgf_utils.generate_sentences(grammar_paths)

        for intent_name, intent_sents in tagged_sentences.items():
            for intent_sent in intent_sents:
                # Template -> untagged sentence + entities
                sentence, entities = utils.extract_entities(intent_sent)

                # Split sentence into words (tokens)
                sentence, tokens = sanitize_sentence(sentence, profile.training)
                sentences_by_intent[intent_name].append((sentence, entities))

                # Collect all used words
                words_needed.update(tokens)

    # Load base and custom dictionaries
    ps_config = stt_config['pocketsphinx']
    base_dictionary_path = profile.read_path(ps_config['base_dictionary'])
    custom_path = profile.read_path(ps_config['custom_words'])

    word_dict = {}
    for word_dict_path in [base_dictionary_path, custom_path]:
        if os.path.exists(word_dict_path):
            with open(word_dict_path, 'r') as dictionary_file:
                utils.read_dict(dictionary_file, word_dict)

    # Add words from wake word if using pocketsphinx
    if profile.wake.get('system') == 'pocketsphinx':
        wake_config = profile.wake['pocketsphinx']
        wake_keyphrase = wake_config['keyphrase']
        _, wake_tokens = sanitize_sentence(wake_keyphrase, profile.training)
        words_needed.update(wake_tokens)

    # Check for unknown words
    unknown_words = words_needed - word_dict.keys()
    unknown_path = profile.read_path(ps_config['unknown_words'])

    if len(unknown_words) > 0:
        with open(unknown_path, 'w') as unknown_file:
            for word in unknown_words:
                result = utils.lookup_word(word, word_dict, profile, n=1)

                pronounces = result['pronunciations']
                phonemes = ' '.join(pronounces)

                # Dictionary uses upper-case letters
                if stt_config.get('dictionary_upper', False):
                    word = word.upper()
                else:
                    word = word.lower()

                print(word.lower(), phonemes, file=unknown_file)

        raise RuntimeError('Training failed due to %s unknown word(s)' % len(unknown_words))

    elif os.path.exists(unknown_path):
        # Remove unknown dictionary
        os.unlink(unknown_path)


    # Write out dictionary with only the necessary words (speeds up loading)
    dictionary_path = profile.write_path(ps_config['dictionary'])
    with open(dictionary_path, 'w') as dictionary_file:
        for word in sorted(words_needed):
            for i, pronounce in enumerate(word_dict[word]):
                if i < 1:
                    print(word, pronounce, file=dictionary_file)
                else:
                    print('%s(%s)' % (word, i+1), pronounce, file=dictionary_file)

    logging.debug('Wrote %s word(s) to %s' % (len(words_needed), dictionary_path))

    # Repeat sentences so that all intents will contain the same number
    balance_sentences = profile.training.get('balance_sentences', True)
    if balance_sentences:
        # Use least common multiple
        lcm_sentences = utils.lcm(*(len(sents) for sents
                                    in sentences_by_intent.values()))
    else:
        lcm_sentences = 0  # no repeats

    # Write sentences to text file
    sentences_text_path = profile.write_path(stt_config['sentences_text'])
    with open(sentences_text_path, 'w') as sentences_text_file:
        num_sentences = 0
        for intent_name, intent_sents in sentences_by_intent.items():
            num_repeats = max(1, lcm_sentences // len(intent_sents))
            for sentence, slots in intent_sents:
                for i in range(num_repeats):
                    print(sentence, file=sentences_text_file)
                    num_sentences = num_sentences + 1

    logging.debug('Wrote %s sentence(s) to %s' % (num_sentences, sentences_text_path))

    # Generate ARPA language model
    lm = train_speech_recognizer(profile)

    # Save to profile
    lm_path = profile.write_path(ps_config['language_model'])
    with open(lm_path, 'w') as lm_file:
        lm_file.write(lm)

    # Expand sentences for intent recognizer
    intent_system = profile.intent.get('system', 'fuzzywuzzy')

    if intent_system == 'rasa':
        rasa_config = profile.intent[intent_system]

        # Use rasaNLU
        examples_md_path = profile.write_path(rasa_config['examples_markdown'])
        with open(examples_md_path, 'w') as examples_md_file:
            for intent_name, intent_sents in tagged_sentences.items():
                # Rasa Markdown training format
                print('## intent:%s' % intent_name, file=examples_md_file)
                for intent_sent in intent_sents:
                    print('-', intent_sent, file=examples_md_file)

                print('', file=examples_md_file)

        # Train rasaNLU
        project_dir = profile.write_dir(rasa_config['project_dir'])
        project_name = rasa_config['project_name']
        rasa_config_path = profile.read_path(rasa_config['config'])

        train_intent_recognizer(examples_md_path, rasa_config_path,
                                project_dir, project_name)
    else:
        fuzzy_config = profile.intent[intent_system]

        # Use fuzzywuzzy
        examples_path = profile.write_path(fuzzy_config['examples_json'])
        examples = intent.make_examples(profile, sentences_by_intent)
        with open(examples_path, 'w') as examples_file:
            json.dump(examples, examples_file, indent=4)
Exemplo n.º 18
0
# Smallest multiple

from utils import lcm

limit = 20
l = 1
for i in range(1, limit + 1):
    l = lcm(l, i)
print l
Exemplo n.º 19
0
    def train(self, train_size, batch_step, epochs, is_train_cont=False):
        """
        Train on first :py:obj:`train_size` mnist train datasets.

        Training and evaluation of MNIST using multilayer
        perceptron maxout layers

        Algo ::

            for each epoch
                for each batch
                    get batch data

                    1. forward pass the data to network
                    2. compute loss and propagate the gradient
                    3. optimize by updating the weights
                    4. Calculate accuracy

                    track total time while doing 1, 2, 3

        :param train_size: number of examples in training set
        :type train_size: :py:obj:`int`
        :param batch_step: batch size
        :type batch_step: :py:obj:`int`
        :param epochs: number of epochs
        :type epochs: :py:obj:`int`
        """
        train_data = self.trainset.train_data.to(device)
        # shuffle data to account for permutation invariant
        idx = torch.randperm(train_size)
        train_data = train_data[idx]
        train_data = self.reshape_data(train_data)
        train_labels = self.trainset.train_labels.to(device)
        train_labels = train_labels[idx]
        for epoch in range(epochs):
            running_loss, training_loss = 0, 0
            running_time, elapsed = 0, 0
            training_acc, acc, _acc = 0, 0, 0
            examples = 0
            print_count = lcm(self.LOGGING_MOD, batch_step) // batch_step
            if epoch == 5 and self.layer_type == 'conv' and not is_train_cont:
                self.lr_update(0.005)
            elif epoch == 0 and is_train_cont and self.layer_type == 'conv':
                self.lr_update(0.001)
            elif epoch == 5 and is_train_cont and self.layer_type == 'conv':
                self.lr_update(0.0005)
            for batch_i in range(0, train_size, batch_step):
                # get input data for current batch
                train_batch = train_data[batch_i:min(batch_i+batch_step, train_size)]
                label_batch = train_labels[batch_i:min(batch_i+batch_step, train_size)]
                examples += train_batch.size()[0]

                self.optimizer.zero_grad()

                # forward + backward + optimize
                elapsed, outputs = total(self.net, train_batch, _reps=1)
                running_time += elapsed
                elapsed, loss = total(self.loss, outputs, label_batch, _reps=1)
                running_time += elapsed
                elapsed, _ = total(loss.backward, _reps=1) # propagate the gradient
                running_time += elapsed
                elapsed, _ = total(self.optimizer.step, _reps=1) # update the weights
                running_time += elapsed

                # prepare for accuracy
                _acc = num_corrects(outputs, label_batch)
                acc += _acc
                training_acc += _acc

                # loss
                running_loss += loss.item()
                training_loss += loss.item()
                if batch_i != 0 and batch_i % self.LOGGING_MOD == 0:
                    self.logger.info('Training Epoch: %d | Time: %.4fs Avg time: %.4fs '
                                     'Batch: %d Accuracy: %.2f Loss: %.4f',
                                     epoch, running_time, running_time / print_count,
                                     batch_i, acc * 100. / examples,
                                     running_loss / print_count)
                    # reinitialize variables
                    running_time = 0
                    acc = 0
                    examples = 0
                    running_loss = 0
            self.logger.info('Training Epoch: %d | Training Accuracy: %.4f Training Loss: %.4f',
                             epoch, training_acc / train_size,
                             training_loss / (train_size // batch_step + 1))
Exemplo n.º 20
0
def P(s, N):
    g = 1
    for i in xrange(2, s + 1):
        g = lcm(g, i)
    gp = lcm(g, s + 1)
    return (N + g - 2) // g - (N + gp - 2) // gp