Exemple #1
0
def main():
    coronaTexts = CoronaTexts()
    coronaTexts.constructCountries()  # 6 will be quit
    currTime = datetime.now()
    Session = Messaging.Session()
    Session.login()
    updateTimer = helpers.Timer(
        helpers.minutesToSeconds(numMinsBetweenUpdates))
    cPrintPollingAlert()

    while True:
        try:
            if hasTimeElapsed(updateTimer):
                # refresh all
                countryPopulationScraper.updatePops(coronaTexts)
                print("\t{}\t|\t{}\t".format("Name", "Population"))
                for country in coronaTexts.countries:
                    print("\t{}\t|\t{}\t".format(country.getCountryName(),
                                                 int(country.getPopulation())))
                coronaTexts.updateAllCountries()
        except KeyboardInterrupt:
            printMenu()
            userInput = getUsrMenuInput()
            if userInput == 0:
                coronaTexts.addEmailAddress()
            if userInput == 1:
                coronaTexts.showAllEmailAddresses()
            cPrintPollingAlert()
Exemple #2
0
def test(opt):
    log = helpers.Logger(opt.verbose)
    timer = helpers.Timer()
    # Load data =========================================================
    log.info('Reading corpora')
    # Read vocabs
    widss, ids2ws, widst, ids2wt = helpers.get_dictionaries(opt, test=True)
    # Read test
    tests_data = np.asarray(data.read_corpus(opt.test_src, widss), dtype=list)
    # Test output
    if not opt.test_out:
        opt.test_out = helpers.exp_filename(opt, 'test.out')
    # Get target language model
    lang_model = helpers.get_language_model(opt, None, widst, test=True)
    # Create model ======================================================
    log.info('Creating model')
    s2s = helpers.build_model(opt, widss, widst, lang_model, test=True)
    # Print configuration ===============================================
    if opt.verbose:
        options.print_config(opt,
                             src_dict_size=len(widss),
                             trg_dict_size=len(widst))
    # Start testing =====================================================
    log.info('Start running on test set, buckle up!')
    timer.restart()
    translations = []
    s2s.set_test_mode()
    for i, x in enumerate(tests_data):
        y = s2s.translate(x, beam_size=opt.beam_size)
        translations.append(' '.join([ids2wt[w] for w in y[1:-1]]))
    np.savetxt(opt.test_out, translations, fmt='%s')
    translations = np.asarray(translations, dtype=str)
    BLEU, details = evaluation.bleu_score(opt.test_dst, opt.test_out)
    log.info('Finished running on test set %.2f elapsed.' % timer.tick())
    log.info(details)
Exemple #3
0
def __build_result_assign_step(hs, res, cx2_kpts, cx2_desc, cx2_rchip_size, assign_matches, verbose):
    '1) Assign matches with the chosen function (vsone) or (vsmany)'
    if verbose:
        #helpers.printvar(locals(), 'cx2_desc')
        #helpers.printvar(locals(), 'res.qcx')
        num_qdesc = len(cx2_desc[res.qcx])
        print('[mc2] assign %d desc' % (num_qdesc))
    tt1 = helpers.Timer(verbose=False)
    assign_output = assign_matches(res.qcx, cx2_kpts, cx2_desc, cx2_rchip_size)
    (cx2_fm, cx2_fs, cx2_score) = assign_output
    # Record initial assignments 
    res.assign_time = tt1.toc()
    res.cx2_fm      = np.array(cx2_fm)
    res.cx2_fs      = np.array(cx2_fs)
    res.cx2_score   = cx2_score
Exemple #4
0
def parallelize_tasks(task_list, num_procs, task_lbl='', verbose=True):
    '''
    Used for embarissingly parallel tasks, which write output to disk
    '''
    nTasks = len(task_list)
    msg = ('Distributing %d %s tasks to %d processes' %
           (nTasks, task_lbl, num_procs) if num_procs > 1 else
           'Executing %d %s tasks in serial' % (nTasks, task_lbl))
    with helpers.Timer(msg=msg):
        if num_procs > 1:
            # Parallelize tasks
            return _compute_in_parallel(task_list, num_procs, task_lbl,
                                        verbose)
        else:
            return _compute_in_serial(task_list, task_lbl, verbose)
Exemple #5
0
def main(source: str, remove: bool = False) -> None:
    """
    Executes an NRN stage.

    :param str source: abbreviation for the source province / territory.
    :param bool remove: removes pre-existing validation log within the data/processed directory for the specified
        source, default False.
    """

    try:

        with helpers.Timer():
            stage = Stage(source, remove)
            stage.execute()

    except KeyboardInterrupt:
        logger.exception("KeyboardInterrupt: Exiting program.")
        sys.exit(1)
Exemple #6
0
def __build_result_verify_step(hs, res, cx2_kpts, cx2_rchip_size, verbose):
    ' 2) Spatially verify the assigned matches'
    cx2_fm    = res.cx2_fm
    cx2_fs    = res.cx2_fs
    cx2_score = res.cx2_score
    if verbose:
        num_assigned = np.array([len(fm) for fm in cx2_fm]).sum()
        print('[mc2] verify %d assigned matches' % (num_assigned))
    tt2 = helpers.Timer(verbose=False)
    sv_output = spatially_verify_matches(res.qcx, cx2_kpts, cx2_rchip_size, cx2_fm, cx2_fs, cx2_score)
    (cx2_fm_V, cx2_fs_V, cx2_score_V) = sv_output
    # Record verified assignments 
    res.verify_time = tt2.toc()
    res.cx2_fm_V    = np.array(cx2_fm_V)
    res.cx2_fs_V    = np.array(cx2_fs_V)
    res.cx2_score_V = cx2_score_V
    if verbose:
        num_verified = np.array([len(fm) for fm in cx2_fm_V]).sum()
        print('[mc2] verified %d matches' % (num_verified))
Exemple #7
0
def main(source: str, remove: bool = False, exclude_old: bool = False) -> None:
    """
    Executes an NRN stage.

    :param str source: abbreviation for the source province / territory.
    :param bool remove: removes pre-existing files within the data/interim directory for the specified source, default
        False.
    :param bool exclude_old: excludes the previous NRN vintage for the specified source from being removed if
        remove=True, default False. Option has no effect if remove=False.
    """

    try:

        with helpers.Timer():
            stage = Stage(source, remove, exclude_old)
            stage.execute()

    except KeyboardInterrupt:
        logger.exception("KeyboardInterrupt: Exiting program.")
        sys.exit(1)
Exemple #8
0
def test(options):
    log = helpers.Logger(options.verbose)
    timer = helpers.Timer()
    # Load data =========================================================
    log.info('Reading corpora')
    # Read vocabs
    vocab = helpers.get_dictionaries(options, test=True)
    src_dic, trg_dic = vocab['src'], vocab['trg']
    # Read test
    tests_data = src_dic.read_corpus(options.test_src)
    # Test output
    if not options.test_out:
        options.test_out = helpers.exp_filename(options, 'test.out')
    # Get target language model
    lang_model = helpers.get_language_model(options,
                                            None,
                                            trg_dic.size(),
                                            test=True)
    # Create model ======================================================
    log.info('Creating model')
    s2s = helpers.build_model(options, vocab, lang_model, test=True)
    # Print configuration ===============================================
    if options.verbose:
        Opts.print_config(options,
                          src_dict_size=src_dic.size(),
                          trg_dict_size=trg_dic.size())
    # Start testing =====================================================
    log.info('Start running on test set, buckle up!')
    timer.restart()
    translations = []
    s2s.set_test_flag()
    for i, x in enumerate(tests_data):
        y = s2s.translate(x, beam_size=options.beam_size)
        translations.append(' '.join([trg_dic.get_word(w) for w in y[1:-1]]))
    translations = np.asarray(translations, dtype=str)
    np.savetxt(options.test_out, translations, fmt='%s')
    if options.test_dst is not None:
        BLEU, details = evaluation.bleu_score(options.test_dst,
                                              options.test_out)
        log.info(details)
    log.info('Finished running on test set %.2f elapsed.' % timer.tick())
Exemple #9
0
def parallelize_tasks(task_list, num_procs, task_lbl, verbose=True):
    '''
    Used for embarissingly parallel tasks, which write output to disk
    '''
    nTasks = len(task_list)
    msg = ('Distributing %d %s tasks to %d processes' %
           (nTasks, task_lbl, num_procs) if num_procs > 1 else
           'Executing %d %s tasks in serial' % (nTasks, task_lbl))
    with helpers.Timer(msg=msg):
        if num_procs >= 1:
            print('[parallel] Forced serial processing')
            # Parallelize tasks
            return _compute_in_serial(
                task_list, task_lbl,
                verbose)  #trying to force serial computation -MD
            #return _compute_in_parallel(task_list, num_procs, task_lbl, verbose)
            #return _compute_in_parallel(task_list, num_procs, task_lbl, verbose)
            ''' Hacky patch to hopefully avoid segfaults '''
            return _compute_in_serial(task_list, task_lbl, verbose)
        else:
            print('[parallel] Natural serial processing')
            return _compute_in_serial(task_list, task_lbl, verbose)
Exemple #10
0
def train(options):
    log = helpers.Logger(options.verbose)
    timer = helpers.Timer()
    # Load data =========================================================
    log.info('Reading corpora')
    # Read vocabs
    vocab = helpers.get_dictionaries(options)
    src_dic, trg_dic = vocab['src'], vocab['trg']
    # Read training
    train_src_data = src_dic.read_corpus(options.train_src)
    train_trg_data = trg_dic.read_corpus(options.train_dst)

    max_src_len, max_trg_len = options.max_src_len, options.max_trg_len
    if max_src_len > 0 or max_trg_len > 0:
        train_src_data, train_trg_data = corpus_filter(train_src_data,
                                                       train_trg_data,
                                                       max_src_len,
                                                       max_trg_len)
        assert len(train_src_data) == len(
            train_trg_data
        ), 'Size of source corpus and the target corpus must be the same!!'
    # Read validation
    valid_src_data = src_dic.read_corpus(options.valid_src)
    valid_trg_data = trg_dic.read_corpus(options.valid_dst)
    # Validation output
    if not options.valid_out:
        options.valid_out = helpers.exp_filename(options, 'valid.out')
    # Get target language model
    lang_model = helpers.get_language_model(options, train_trg_data,
                                            trg_dic.size())
    # Create model ======================================================
    log.info('Creating model')
    s2s = helpers.build_model(options, vocab, lang_model)

    # Trainer ==========================================================
    trainer = helpers.get_trainer(options, s2s)
    log.info('Using ' + options.trainer + ' optimizer')
    # Print configuration ===============================================
    if options.verbose:
        Opts.print_config(options,
                          src_dict_size=src_dic.size(),
                          trg_dict_size=trg_dic.size())
    # Creat batch loaders ===============================================
    log.info('Creating batch loaders')
    trainbatchloader = BatchLoader(train_src_data, train_trg_data,
                                   options.batch_size)
    devbatchloader = BatchLoader(valid_src_data, valid_trg_data,
                                 options.dev_batch_size)
    # Start training ====================================================
    log.info('starting training')
    timer.restart()
    train_loss = 0.
    processed = 0
    best_bleu = -1
    bleu = -1
    deadline = 0
    i = 0
    for epoch in xrange(options.num_epochs):
        for x, y in trainbatchloader:
            s2s.set_train_flag()
            processed += sum(map(len, y))
            bsize = len(y)
            # Compute loss
            loss = s2s.calculate_loss(x, y)
            # Backward pass and parameter update
            train_loss += loss.scalar_value() * bsize

            loss.backward()
            trainer.update()

            if (i + 1) % options.check_train_error_every == 0:
                # Check average training error from time to time
                logloss = train_loss / processed
                ppl = np.exp(logloss)
                trainer.status()
                log.info(
                    " Training_loss=%f, ppl=%f, time=%f s, tokens processed=%d"
                    % (logloss, ppl, timer.tick(), processed))
                train_loss = 0
                processed = 0

            if (i + 1) % options.check_valid_error_every == 0:
                # Check generalization error on the validation set from time to time
                s2s.set_test_flag()
                dev_loss = 0
                dev_processed = 0
                timer.restart()
                for x, y in devbatchloader:
                    dev_processed += sum(map(len, y))
                    bsize = len(y)
                    loss = s2s.calculate_loss(x, y)
                    dev_loss += loss.scalar_value() * bsize
                dev_logloss = dev_loss / dev_processed
                dev_ppl = np.exp(dev_logloss)
                log.info(
                    "[epoch %d] Dev loss=%f, ppl=%f, time=%f s, tokens processed=%d"
                    %
                    (epoch, dev_logloss, dev_ppl, timer.tick(), dev_processed))

            if (i + 1) % options.valid_bleu_every == 0:
                # Check BLEU score on the validation set from time to time
                s2s.set_test_flag()
                log.info('Start translating validation set, buckle up!')
                timer.restart()
                with open(options.valid_out, 'w+') as fp:
                    for x in valid_src_data:
                        y_hat = s2s.translate(x, beam_size=options.beam_size)
                        translation = [
                            trg_dic.get_word(w) for w in y_hat[1:-1]
                        ]
                        fp.write(' '.join(translation))
                        fp.write('\n')
                bleu, details = evaluation.bleu_score(options.valid_dst,
                                                      options.valid_out)
                log.info('Finished translating validation set %.2f elapsed.' %
                         timer.tick())
                log.info(details)
                # Early stopping : save the latest best model
                if bleu > best_bleu:
                    best_bleu = bleu
                    log.info('Best BLEU score up to date, saving model to %s' %
                             options.model)
                    s2s.save(options.model)
                    deadline = 0
                else:
                    deadline += 1
                if options.patience > 0 and deadline > options.patience:
                    log.info('No improvement since %d epochs, early stopping '
                             'with best validation BLEU score: %.3f' %
                             (deadline, best_bleu))
                    sys.exit()
        #	i += 1
        # trainer.update()

    #if bleu > best_bleu:
    #	s2s.save(options.model)
    s2s.save(options.model)
Exemple #11
0

@autojit
def sum2d5(arr):
    M, N = arr.shape
    result = 0.0
    result = arr.sum()
    return result


if __name__ == '__main__':
    input_sizes = [1, 10, 100, 1000, 10000]
    for sz in input_sizes:
        print('\n--------------------\nInput Size: ' + str(sz))
        arr = np.random.rand(sz, sz)
        with helpers.Timer('with numba'):
            res = sum2d(arr)
            print res
        with helpers.Timer('with numpy'):
            res2 = sum2d4(arr)
            print res2
        with helpers.Timer('without numpy and numba'):
            res2 = sum2d5(arr)
            print res2
        '''
        with helpers.Timer('without numba (but smart)'):
            res3 = sum2d3(arr)
            print res3
        with helpers.Timer('without numba (naive)'):
            res2 = sum2d2(arr)
            print res2
Exemple #12
0
def test_realdata2():
    from helpers import printWARN, printINFO
    import warnings
    import numpy.linalg as linalg
    import numpy as np
    import scipy.sparse as sparse
    import scipy.sparse.linalg as sparse_linalg
    import load_data2
    import params
    import draw_func2 as df2
    import helpers
    import spatial_verification
    #params.reload_module()
    #load_data2.reload_module()
    #df2.reload_module()

    db_dir = load_data2.MOTHERS
    hs = load_data2.HotSpotter(db_dir)
    assign_matches = hs.matcher.assign_matches
    qcx = 0
    cx = hs.get_other_cxs(qcx)[0]
    fm, fs, score = hs.get_assigned_matches_to(qcx, cx)
    # Get chips
    rchip1 = hs.get_chip(qcx)
    rchip2 = hs.get_chip(cx)
    # Get keypoints
    kpts1 = hs.get_kpts(qcx)
    kpts2 = hs.get_kpts(cx)
    # Get feature matches 
    kpts1_m = kpts1[fm[:, 0], :].T
    kpts2_m = kpts2[fm[:, 1], :].T
    
    title='(qx%r v cx%r)\n #match=%r' % (qcx, cx, len(fm))
    df2.show_matches2(rchip1, rchip2, kpts1,  kpts2, fm, fs, title=title)

    np.random.seed(6)
    subst = helpers.random_indexes(len(fm),len(fm))
    kpts1_m = kpts1[fm[subst, 0], :].T
    kpts2_m = kpts2[fm[subst, 1], :].T

    df2.reload_module()
    df2.SHOW_LINES = True
    df2.ELL_LINEWIDTH = 2
    df2.LINE_ALPHA = .5
    df2.ELL_ALPHA  = 1
    df2.reset()
    df2.show_keypoints(rchip1, kpts1_m.T, fignum=0, plotnum=121)
    df2.show_keypoints(rchip2, kpts2_m.T, fignum=0, plotnum=122)
    df2.show_matches2(rchip1, rchip2, kpts1_m.T,  kpts2_m.T, title=title,
                      fignum=1, vert=True)

    spatial_verification.reload_module()
    with helpers.Timer():
        aff_inliers1 = spatial_verification.aff_inliers_from_ellshape2(kpts1_m, kpts2_m, xy_thresh_sqrd)
    with helpers.Timer():
        aff_inliers2 = spatial_verification.aff_inliers_from_ellshape(kpts1_m, kpts2_m, xy_thresh_sqrd)

    # Homogonize+Normalize
    xy1_m    = kpts1_m[0:2,:] 
    xy2_m    = kpts2_m[0:2,:]
    (xyz_norm1, T1) = spatial_verification.homogo_normalize_pts(xy1_m[:,aff_inliers1]) 
    (xyz_norm2, T2) = spatial_verification.homogo_normalize_pts(xy2_m[:,aff_inliers1])

    H_prime = spatial_verification.compute_homog(xyz_norm1, xyz_norm2)
    H = linalg.solve(T2, H_prime).dot(T1)                # Unnormalize

    Hdet = linalg.det(H)

    # Estimate final inliers
    acd1_m   = kpts1_m[2:5,:] # keypoint shape matrix [a 0; c d] matches
    acd2_m   = kpts2_m[2:5,:]
    # Precompute the determinant of lower triangular matrix (a*d - b*c); b = 0
    det1_m = acd1_m[0] * acd1_m[2]
    det2_m = acd2_m[0] * acd2_m[2]

    # Matrix Multiply xyacd matrix by H
    # [[A, B, X],      
    #  [C, D, Y],      
    #  [E, F, Z]] 
    # dot 
    # [(a, 0, x),
    #  (c, d, y),
    #  (0, 0, 1)] 
    # = 
    # [(a*A + c*B + 0*E,   0*A + d*B + 0*X,   x*A + y*B + 1*X),
    #  (a*C + c*D + 0*Y,   0*C + d*D + 0*Y,   x*C + y*D + 1*Y),
    #  (a*E + c*F + 0*Z,   0*E + d*F + 0*Z,   x*E + y*F + 1*Z)]
    # =
    # [(a*A + c*B,               d*B,         x*A + y*B + X),
    #  (a*C + c*D,               d*D,         x*C + y*D + Y),
    #  (a*E + c*F,               d*F,         x*E + y*F + Z)]
    # # IF x=0 and y=0
    # =
    # [(a*A + c*B,               d*B,         0*A + 0*B + X),
    #  (a*C + c*D,               d*D,         0*C + 0*D + Y),
    #  (a*E + c*F,               d*F,         0*E + 0*F + Z)]
    # =
    # [(a*A + c*B,               d*B,         X),
    #  (a*C + c*D,               d*D,         Y),
    #  (a*E + c*F,               d*F,         Z)]
    # --- 
    #  A11 = a*A + c*B
    #  A21 = a*C + c*D
    #  A31 = a*E + c*F
    #  A12 = d*B
    #  A22 = d*D
    #  A32 = d*F
    #  A31 = X
    #  A32 = Y
    #  A33 = Z
    #
    # det(A) = A11*(A22*A33 - A23*A32) - A12*(A21*A33 - A23*A31) + A13*(A21*A32 - A22*A31)

    det1_mAt = det1_m * Hdet
    # Check Error in position and scale
    xy_sqrd_err = (x1_mAt - x2_m)**2 + (y1_mAt - y2_m)**2
    scale_sqrd_err = det1_mAt / det2_m
    # Check to see if outliers are within bounds
    xy_inliers = xy_sqrd_err < xy_thresh_sqrd
    s1_inliers = scale_sqrd_err > scale_thresh_low
    s2_inliers = scale_sqrd_err < scale_thresh_high
    _inliers, = np.where(np.logical_and(np.logical_and(xy_inliers, s1_inliers), s2_inliers))

    xy1_mHt = transform_xy(H, xy1_m)                        # Transform Kpts1 to Kpts2-space
    sqrd_dist_error = np.sum( (xy1_mHt - xy2_m)**2, axis=0) # Final Inlier Errors
    inliers = sqrd_dist_error < xy_thresh_sqrd



    df2.show_matches2(rchip1, rchip2, kpts1_m.T[best_inliers1], kpts2_m.T[aff_inliers1], title=title, fignum=2, vert=False)
    df2.show_matches2(rchip1, rchip2, kpts1_m.T[best_inliers2], kpts2_m.T[aff_inliers2], title=title, fignum=3, vert=False)
    df2.present(wh=(600,400))
Exemple #13
0
#### Modify the below to get different distributions!

from comp_implt import *
import helpers
import time

s1_belief = beliefs[12]
given_statement = beliefs[0]

correction = phrase([rubicon])

qud = "hang"
alpha = 1.  #try different values: 1.0: normal optimality; higher settings: sharper distribution

TIME = helpers.Timer()
smoketest = True  #1: calculate small utterance prior in s_1; 0: calculate full big utterance prior in s_1
ext = "reverse"  #useful plot file mnemonic
if __name__ == "__main__":

    s1_attorney = S(alpha, swk, beliefs, s1_belief, quds)
    Listener = L(alpha, swk, beliefs, given_statement, quds)

    with TIME("l0 calculation", x=True):
        #lit listener
        l0_info = "Lit. Listener distribution.\n\n- Correction: " + str(
            correction)
        l0_dist = Listener.L0(correction)
        helpers.plotter(l0_dist,
                        output="plots/l0_" + str(ext) + ".png",
                        addinfo=l0_info)
Exemple #14
0
def train(opt):
    log = helpers.Logger(opt.verbose)
    timer = helpers.Timer()
    # Load data =========================================================
    log.info('Reading corpora')
    # Read vocabs
    widss, ids2ws, widst, ids2wt = helpers.get_dictionaries(opt)

    # Read training
    trainings_data = data.read_corpus(opt.train_src, widss)
    trainingt_data = data.read_corpus(opt.train_dst, widst)
    # Read validation
    valids_data = data.read_corpus(opt.valid_src, widss)
    validt_data = data.read_corpus(opt.valid_dst, widst)
    # Validation output
    if not opt.valid_out:
        opt.valid_out = helpers.exp_filename(opt, 'valid.out')
    # Get target language model
    lang_model = helpers.get_language_model(opt, trainingt_data, widst)
    # Create model ======================================================
    log.info('Creating model')
    s2s = helpers.build_model(opt, widss, widst, lang_model)

    # Trainer ==========================================================
    trainer = helpers.get_trainer(opt, s2s)
    log.info('Using ' + opt.trainer + ' optimizer')
    # Print configuration ===============================================
    if opt.verbose:
        options.print_config(opt,
                             src_dict_size=len(widss),
                             trg_dict_size=len(widst))
    # Creat batch loaders ===============================================
    log.info('Creating batch loaders')
    trainbatchloader = data.BatchLoader(trainings_data, trainingt_data,
                                        opt.batch_size)
    devbatchloader = data.BatchLoader(valids_data, validt_data,
                                      opt.dev_batch_size)
    # Start training ====================================================
    log.info('starting training')
    timer.restart()
    train_loss = 0
    processed = 0
    best_bleu = -1
    deadline = 0
    i = 0
    for epoch in range(opt.num_epochs):
        for x, y in trainbatchloader:
            s2s.set_train_mode()
            processed += sum(map(len, y))
            bsize = len(y)
            # Compute loss
            loss = s2s.calculate_loss(x, y)
            # Backward pass and parameter update
            loss.backward()
            trainer.update()
            train_loss += loss.scalar_value() * bsize
            if (i + 1) % opt.check_train_error_every == 0:
                # Check average training error from time to time
                logloss = train_loss / processed
                ppl = np.exp(logloss)
                trainer.status()
                log.info(
                    " Training_loss=%f, ppl=%f, time=%f s, tokens processed=%d"
                    % (logloss, ppl, timer.tick(), processed))
                train_loss = 0
                processed = 0
            if (i + 1) % opt.check_valid_error_every == 0:
                # Check generalization error on the validation set from time to time
                s2s.set_test_mode()
                dev_loss = 0
                dev_processed = 0
                timer.restart()
                for x, y in devbatchloader:
                    dev_processed += sum(map(len, y))
                    bsize = len(y)
                    loss = s2s.calculate_loss(x, y, test=True)
                    dev_loss += loss.scalar_value() * bsize
                dev_logloss = dev_loss / dev_processed
                dev_ppl = np.exp(dev_logloss)
                log.info(
                    "[epoch %d] Dev loss=%f, ppl=%f, time=%f s, tokens processed=%d"
                    %
                    (epoch, dev_logloss, dev_ppl, timer.tick(), dev_processed))

            if (i + 1) % opt.valid_bleu_every == 0:
                # Check BLEU score on the validation set from time to time
                s2s.set_test_mode()
                log.info('Start translating validation set, buckle up!')
                timer.restart()
                with open(opt.valid_out, 'w+') as f:
                    for x in valids_data:
                        y_hat = s2s.translate(x, beam_size=opt.beam_size)
                        translation = [ids2wt[w] for w in y_hat[1:-1]]
                        print(' '.join(translation), file=f)
                bleu, details = evaluation.bleu_score(opt.valid_dst,
                                                      opt.valid_out)
                log.info('Finished translating validation set %.2f elapsed.' %
                         timer.tick())
                log.info(details)
                # Early stopping : save the latest best model
                if bleu > best_bleu:
                    best_bleu = bleu
                    log.info('Best BLEU score up to date, saving model to %s' %
                             s2s.model_file)
                    s2s.save()
                    deadline = 0
                else:
                    deadline += 1
                if opt.patience > 0 and deadline > opt.patience:
                    log.info('No improvement since %d epochs, early stopping '
                             'with best validation BLEU score: %.3f' %
                             (deadline, best_bleu))
                    exit()
            i = i + 1
        trainer.update_epoch()
import helpers

M1 = 3
M2 = 5
N = 1000


res = set()
t1 = helpers.Timer(True)
for i in range((N // M1)+1):
    for j in range((N // M2)+1):
        S = (i * M1,1)[i == 0] * (j * M2,1)[j == 0]
        if S > 1 and S < N:
            res.add(S)
t1.stop()
t1.show()

sumi = 0
for i in res:
    sumi += i

print(res)
print(sumi)