示例#1
0
def solution(limit: int) -> int:
    """Generate triangle numbers and their factors till we count 500."""
    triangle = 1
    n = 2
    while True:
        num_factors = len(factors(triangle))
        if num_factors > limit:
            return triangle
        triangle += n
        n += 1
示例#2
0
文件: 012.py 项目: hoodakaushal/Euler
def trifecta(n):
    """

    :type n: int
    """
    cand = 0
    i = 1
    cand_count = 0
    while not cand_count > n:
        cand += i
        i += 1
        cand_count = len(tools.factors(cand))
    return cand
示例#3
0
文件: 021.py 项目: hoodakaushal/Euler
def amicables(n):
    ans = 0
    sumdivs = {}
    for i in range(1, n + 1):
        sumdivs[i] = sum(tools.factors(i)) - i
    print(sumdivs)
    for i in range(1, len(sumdivs)):
        j = sumdivs[i]
        if 1 <= j <= n:
            a = i
            b = sumdivs[i]
            if sumdivs[b] == a and sumdivs[a] == b and not (a == b):
                print(i, sumdivs[i])
                ans += (i + sumdivs[i])
    return ans // 2
示例#4
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('-data',
                        type=str,
                        choices=[
                            'MNIST', 'FashionMNIST', 'CIFAR10', 'CIFAR100',
                            'BWCIFAR', 'SMALL'
                        ],
                        help='dataset to be used in the experiment')

    parser.add_argument('-n_hidden',
                        type=int,
                        default=128,
                        help='number of hidden units inside the model')
    parser.add_argument('-n_layers',
                        type=int,
                        default=1,
                        help='number of layers for mult-layered models')
    parser.add_argument('-n_iter',
                        type=int,
                        default=5,
                        help='number of iterations to be done in Swarm layers')

    parser.add_argument('-non_lin',
                        default='relu',
                        choices=['relu', 'elu', 'lrelu'],
                        help='non-linearity used between different layers')

    parser.add_argument('-bs', type=int, default=100, help='batch size')
    parser.add_argument(
        '-wc',
        type=float,
        default=60,
        help='allowed wall clock time for training (in minutes)')
    parser.add_argument(
        '-update_interval',
        type=float,
        default=10,
        help='update interval to generate trace and sample plots (in minutes)')

    parser.add_argument('-lr', type=float, default=0.01, help='learning rate')

    parser.add_argument('-no_cuda',
                        action='store_true',
                        help='dont use CUDA even if it is available')

    parser.add_argument(
        '-name',
        type=str,
        default=None,
        help=
        'you can provide a model name that will be parsed into cmd line options'
    )

    parser.add_argument('-dry_run',
                        action='store_true',
                        help='just print out the model name and exit')

    parser.add_argument(
        '-to_stdout',
        action='store_true',
        help='log all output to stdout instead of modelname/log')

    parser.add_argument('-bt_horizon',
                        type=float,
                        default=0.1,
                        help='backtracking horizon')
    parser.add_argument('-bt_alpha',
                        type=float,
                        default=0.9,
                        help='backtracking learning rate discount factor')

    parser.add_argument('-cond',
                        action='store_true',
                        help='do class conditional modeling')

    parser.add_argument('-resume',
                        type=str,
                        default=None,
                        help='resume model from modelname/best.pkl')

    parser.add_argument('-learn_loc', type=bool, default=False)

    opt = parser.parse_args()

    if opt.name is not None:
        opt = ModelName().parse(opt.name, opt)
    name = ModelName().create(opt)
    assert opt.name is None or name == opt.name

    print(name)
    if opt.dry_run:
        exit()

    import sys

    name_part = name + ".part"
    try:
        os.mkdir(name_part)
    except:
        pass

    if not opt.to_stdout:
        sys.stdout = open(name_part + '/log', 'w')

    opt.cuda = not opt.no_cuda

    C, H, W, K = {
        'MNIST': (1, 28, 28, 10),
        'FashionMNIST': (1, 28, 28, 10),
        'CIFAR10': (3, 32, 32, 10),
        'CIFAR100': (3, 32, 32, 100),
        'BWCIFAR': (1, 32, 32, 10),
        'SMALL': (3, 16, 16, 10),
    }[opt.data]
    n_classes = 256  # not dependent on the dataset so far

    non_linearity = {
        'elu': nn.ELU(),
        'relu': nn.ReLU(),
        'lrelu': nn.LeakyReLU()
    }[opt.non_lin]

    n_in = opt.n_hidden
    n_hidden = opt.n_hidden
    n_layers = opt.n_layers
    n_iter = opt.n_iter

    # in case the desired batch size does not fit into CUDA memory
    # do batch iteration. Try in a loop the largest batch size nad batch_iter=1 first.
    # Decrease batch_size (increase batch_iter) by common factors until there is a model that does not throw an
    # out-of-memory error
    for batch_iter in factors(opt.bs):

        print(type(opt.bs), type(int(opt.bs // batch_iter)))

        print("trying batch size {} in {} iterations".format(
            opt.bs // batch_iter, batch_iter))

        try:
            layers = []
            n_out_last = n_in
            for i in range(n_layers):
                if i < n_layers - 1:
                    layers.append(
                        SwarmLayer(n_in=n_out_last,
                                   n_out=n_hidden,
                                   n_hidden=n_hidden,
                                   n_iter=n_iter,
                                   pooling='CAUSAL'))
                    layers.append(non_linearity)
                    n_out_last = n_hidden
                else:
                    layers.append(
                        SwarmLayer(n_in=n_out_last,
                                   n_out=n_classes,
                                   n_hidden=n_hidden,
                                   n_iter=n_iter,
                                   pooling='CAUSAL'))

            model = SwarmTransformer(layers,
                                     C=C,
                                     W=W,
                                     H=H,
                                     K=K,
                                     n_emb=n_in,
                                     learnable_location_features=opt.learn_loc)

            device = torch.device('cuda' if opt.cuda else 'cpu')
            if torch.cuda.device_count() > 1:
                model = nn.DataParallel(model)

            model.to(device)

            print(model)

            print("backtracking {}% epochs with lr decrease factor {}".format(
                100 * opt.bt_horizon, opt.bt_alpha))

            # create datasets with batch sizes split by batch_iter
            dl_train, dl_val = create_datasets(int(opt.bs // batch_iter),
                                               opt.data)

            sample_fn = create_sample_fn(model, C, H, W, K, device)

            optimizer = optim.Adam(model.parameters(), lr=opt.lr)

            if opt.resume is not None:
                resume(model, optimizer, opt.resume)
                for param_group in optimizer.param_groups:
                    param_group['lr'] = opt.lr

            # create a tracing object, that records training and validation losses and other metrics and records 13 individual
            # weights of every model parameter tensor
            # every now and then it plots learning curves, weight traces and model samples to
            # modelname/[metrics.png,weights.png,samples.png] respectively
            traces = Trace(model, 13, sample_fn, name=name_part, columns=4)

            best_val_loss = math.inf
            val_loss_history = [np.inf]

            t_start = time.time()
            t_update = 0  # timer to count when the next traces update is due
            t_no_training = 0  # time spend generating traces and samples
            e = 0  # count the epochs
            while True:
                # inform the Traces object that a new epoch has begun
                traces.on_epoch_begin(e)

                for i, (X, Y) in enumerate(dl_train):
                    X = X.to(device)
                    Y = Y.to(device)
                    model.train()

                    if i % batch_iter == 0:
                        optimizer.zero_grad()
                        norm = 0

                    loss, _ = model(X, Y)
                    loss = loss.mean()

                    (loss / batch_iter).backward()

                    if (i + 1) % batch_iter == 0:
                        # do an optimizer update step only every batch_iter iterations
                        norm = torch.nn.utils.clip_grad_norm_(
                            model.parameters(), math.inf, norm_type=1)
                        optimizer.step()

                    print(i,
                          "%.4f (norm=%.4g)" % (loss.item(), norm),
                          end="\r")

                    # a dictionary of values and metrics that will be logged by the Traces opbject
                    logs = {'loss': loss.item(), 'norm': norm}

                    time_is_up = time.time(
                    ) > t_start + 60 * opt.wc + t_no_training  #or i>=250
                    if time_is_up:
                        print("preparing to complete")

                    if i + 1 == len(dl_train) or time_is_up:
                        # we are done with the last iteration
                        # -> kick off a validation epoch now and add the val_loss to the log
                        val_loss = validate(model, dl_val, device)
                        print("%d: val_loss = %.4f" % (e, val_loss))
                        logs['val_loss'] = val_loss

                    logs['lr'] = [p['lr'] for p in optimizer.param_groups]

                    # now actually log the metrics for iteration i
                    traces.on_batch_end(i, logs)

                    sys.stdout.flush()

                    if time_is_up:
                        break

                last_worse = np.argwhere(
                    np.array(val_loss_history) > val_loss).max()
                print("last_worse", last_worse)
                if last_worse < min(e * (1.0 - opt.bt_horizon),
                                    e - 5) or val_loss > max(val_loss_history):
                    # the last validation result that was worse than this lays more than bt_horizon% epochs back
                    # or this validation loss is worse than everything before
                    # -> we will discard this model and backtrack to the best we had so far

                    if not time_is_up:
                        # but not if computation time is already up
                        checkpoint_path = name_part + "/best.pkl"

                        keep_lr = [
                            param_group['lr']
                            for param_group in optimizer.param_groups
                        ]

                        resume(model, optimizer, checkpoint_path, name)

                        # once we backtracked, we decrease learning rate by factor bt_alpha
                        for param_group, lr in zip(optimizer.param_groups,
                                                   keep_lr):
                            param_group['lr'] = opt.bt_alpha * lr

                        val_loss = checkpoint['best_val_loss']
                        print("back tracking to {:g}".format(val_loss))

                val_loss_history.append(val_loss)

                if val_loss < best_val_loss:
                    # this model is better than every thing before,
                    # -> let's save it as a check point
                    print(
                        "saving best model at val_loss={:g}".format(val_loss))
                    checkpoint = {}
                    checkpoint['best_val_loss'] = val_loss
                    checkpoint['model'] = model.state_dict()
                    checkpoint['optimizer'] = optimizer.state_dict()
                    checkpoint['name'] = name

                    checkpoint_path = name_part + "/best.pkl"
                    torch.save(checkpoint, checkpoint_path)

                    best_val_loss = val_loss

                if time.time(
                ) > t_update + opt.update_interval * 60 or time_is_up:
                    # it's time to plot some learning curves, weight traces, and sample figures
                    # this can take some time, so we don't do it all to often
                    t_no_training = t_no_training - time.time()

                    # this does the actual magic
                    traces.on_epoch_end(e)

                    # reset the update counter and record how much time we have spent here,
                    # this will not account for the training time budget
                    t_update = time.time()
                    t_no_training = t_no_training + time.time()
                e += 1

                if time_is_up:
                    break

            print(
                "{}s spent preparing traces and samples".format(t_no_training))
            os.rename(name_part, name)

            break  # the loop over batch iterations

        except RuntimeError:
            print("failed with batch size {}".format(opt.bs / batch_iter))
            exc_info = sys.exc_info()
            try:
                del model
            except NameError:
                pass
        finally:
            # Display the *original* exception
            traceback.print_exception(*exc_info)
            del exc_info
示例#5
0
def proper_divisors_sum(n):
    return sum(factors(n))-n
示例#6
0
文件: 3.py 项目: slezica/xiter-euler
def solve():
    return factors(600851475143).max()
from tools import factors

factor_sums = {}
for k in range(1, 10000):
    factor_sums[k] = sum(list(factors(k))) - k

summation = 0
for key in factor_sums:
    key = key  # just for clarification
    value = factor_sums[key]
    try:
        if factor_sums[value] == key and value != key:
            summation += value
    except KeyError:
        pass

print(summation)  # because we add them twice :3
示例#8
0
文件: 5.py 项目: slezica/xiter-euler
def solve():
    return factors(*range(1, 21), reduce_with = max).product()