示例#1
0
def bresenham_circle(bg, xc, yc, R, size):
    x = 0
    y = R

    err = 2 * (1 - R)
    err1 = 0
    err2 = 0

    while (y >= 0):
        plot_all(bg, x, y, xc, yc, size)
        if err < 0:
            err1 = 2 * err + 2 * y - 1
            if err1 <= 0:
                x += 1
                err += 2 * x + 1
            else:
                x += 1
                y -= 1
                err += 2 * (x - y + 1)
        elif err > 0:
            err2 = 2 * err - 2 * x - 1
            if err2 <= 0:
                x += 1
                y -= 1
                err += 2 * (x - y + 1)
            else:
                y -= 1
                err += -2 * y + 1
        else:
            x += 1
            y -= 1
            err += 2 * (x - y + 1)
示例#2
0
def run_aco(data="./data/data_20.txt"):
    points = []
    locations = []
    with open(data) as f:
        for line in f.readlines():
            character = line.split(" ")
            locations.append(
                dict(index=int(character[0]),
                     x=float(character[1]),
                     y=float(character[2])))
            points.append((float(character[1]), float(character[2])))

    cost_matrix = []
    rank = len(points)
    for i in range(rank):
        row = []
        for j in range(rank):
            row.append(distance(locations[i], locations[j]))
        cost_matrix.append(row)
    aco = ACO(20, 200)
    graph = Graph(points, cost_matrix, rank)
    path, length, all_path, all_length = aco.solve(graph)
    print('length: {}, path: {}'.format(length, path))
    print(all_length)
    plot_all(points, all_path, all_length, algorithm="Ant Colony Algorithm")
    plot_animation(points, path, length)
示例#3
0
def bresenham_circle(bg, xc, yc, R, size):
    x = 0
    y = R

    err = 2 * (
        1 - R
    )  # т.к. изначально x = 0, то выводится из (x+1)^2+(y-1)^2 - R^2 = 1 + (R-1)^2 - R^2 = 2*(1-R)
    err1 = 0
    err2 = 0

    while (y >= 0):
        plot_all(bg, x, y, xc, yc, size)
        if err < 0:  # если точка лежит внутри окружности
            err1 = 2 * err + 2 * y - 1
            if err1 <= 0:  # горизонтальный шаг
                x += 1
                err += 2 * x + 1
            else:  #
                x += 1
                y -= 1
                err += 2 * (x - y + 1)
        elif err > 0:
            err2 = 2 * err - 2 * x - 1
            if err2 <= 0:
                x += 1
                y -= 1
                err += 2 * (x - y + 1)
            else:
                y -= 1
                err += -2 * y + 1
        else:
            x += 1
            y -= 1
            err += 2 * (x - y + 1)
示例#4
0
def param_cicrle(bg, xc, yc, R, size):
    dt = 1 / R
    m = pi / 2 * 1.01
    alpha = 0
    while alpha <= m:
        x = round(R * cos(alpha))
        y = round(R * sin(alpha))
        plot_all(bg, x, y, xc, yc, size)
        alpha += dt
示例#5
0
def canon_circle(bg, xc, yc, R, size):
    r2 = R * R
    x_max = round(R / sqrt(2))

    x = 0
    while x <= x_max:
        y = round(sqrt(r2 - x * x))
        plot_all(bg, x, y, xc, yc, size)
        x += 1

    y = round(sqrt(r2 - x_max * x_max))
    while y >= 0:
        x = round(sqrt(r2 - y * y))
        plot_all(bg, x, y, xc, yc, size)
        y -= 1
示例#6
0
def run_anneal(data="./data/data_20.txt"):
    points = []
    locations = []
    with open(data) as f:
        for line in f.readlines():
            character = line.split(" ")
            locations.append(
                dict(index=int(character[0]),
                     x=float(character[1]),
                     y=float(character[2])))
            points.append((float(character[1]), float(character[2])))

    sa = SimAnneal(points, stopping_iter=3000)
    sa.anneal()
    print("best path: ", sa.best_solution, "total length: ", sa.best_fitness)
    plot_all(points,
             sa.solution_list,
             sa.fitness_list,
             algorithm="Simulated Annealing")
    plot_animation(points,
                   sa.best_solution,
                   sa.best_fitness,
                   algorithm="Simulated Annealing")
示例#7
0
def middle_dot_circle(bg, xc, yc, R, size):
    p = int(1 - R)
    x = 0
    y = R

    plot_all(bg, x, y, xc, yc, size)
    plot_all(bg, y, x, xc, yc, size)
    while (x < y):

        x += 1
        if (p < 0):
            p += 2 * x + 1
        else:
            y -= 1
            p += 2 * (x - y) + 1

        plot_all(bg, x, y, xc, yc, size)
        plot_all(bg, y, x, xc, yc, size)
import os

import matplotlib.pyplot as plt

import endomondo
import plot

if __name__ == "__main__":
    endomondo_user = endomondo.EndomondoUser(os.environ["ENDOMONDO_USER_ID"])
    workouts = list(filter(endomondo.workout_registered_by_mobile_app, endomondo_user.workouts()))
    plot.plot_all(workouts)
    plt.show()
示例#9
0
    #left = control.QL2()
    #right = control.Follower()

    ## Load trained controller
    #load_controller(left)

    #measure(left, right)
    #return

    if TRAINING:
        tournament()
    else:
        pygame.font.init()
        pygame.display.init()
        pygame.display.set_caption('Controller {}'.format(args.controller))
        screen = pygame.display.set_mode(SCREEN_SIZE)

        # Play the only trained controller
        c = CONTROLLER
        dev_null = open(os.devnull, 'w')
        c.log_file = dev_null
        #trainfile = get_train_path(c)
        play(screen, c, TRAIN_FPATH)


if __name__ == '__main__':
    main()
    if TRAINING:
        plot_all()
示例#10
0
- schools
- hospitals
- transit

analysis.py - does the calculations and appends new columns to the main data set
importable variables:
- vacant_lots
importable functions:
- calc_pot_res_units()
- calc_nearest()
- score()
- produce_csv()

plot.py - plots the analyzed dataframe using bokeh, which has capabilities and usage
similar to matplotlib, but with the addition of extra tools
importable functions:
- plot_all()

'''

import data
import analysis
import plot

if __name__ == '__main__':
	analysis.calc_pot_res_units()
	analysis.calc_nearest()
	analysis.score()
	analysis.produce_csv()
	plot.plot_all()
示例#11
0
def main(args):
    across = defaultdict(lambda: defaultdict(list))
    algo_names = [
        'SVD',
        'KNNBaseline_item_msd',
    ]
    metrics = [
        'rmse', 'ndcg10',
        #'ndcg5', 'ndcgfull',
    ]
    for colunit in ['increase_{}', 'percent_increase_{}']:
        for userfrac in args.userfracs:
            for ratingfrac in args.ratingfracs:
                for sample_size in args.sample_sizes:
                    outname = 'processed_' + concat_output_filename(
                        args.dataset, args.grouping,
                        userfrac,
                        ratingfrac,
                        sample_size, args.num_samples
                    )
                    err_df = pd.read_csv(outname)

                    for algo_name in algo_names:
                        print('===\n' + algo_name)
                        filtered_df = err_df[err_df.algo_name == algo_name]
                        if args.verbose:
                            print(filtered_df.mean())
                        else:
                            colnames = []
                            for metric in metrics:
                                for group in args.test_groups:
                                    key = '{}_{}'.format(metric, group)
                                    colname = colunit.format(key)
                                    colnames.append(colname)
                            cols = filtered_df[colnames]
                            means = cols.mean()
                            print(means)
                            if args.plot_across:    
                                for col in cols.columns.values:
                                    across[algo_name][col].append(means[col])
                            if args.plot_histograms:
                                plot_all(cols, 'hist', algo_name + '_' + outname)
                    
                if args.plot_across:
                    for algo_name in algo_names:
                        _, axes = plt.subplots(ncols=len(metrics))
                        _, zoomaxes = plt.subplots(ncols=len(across[algo_name]))
                        metric_to_index = {}
                        for i_metric, metric in enumerate(metrics):
                            metric_to_index[metric] = i_metric
                        for i, (key, val) in enumerate(across[algo_name].items()):
                            for metric, index in metric_to_index.items():
                                if metric in key:
                                    i_metric = index
                            ax = axes[i_metric]
                            ax.plot(val)
                            ax.set_title(algo_name)
                            zoomax = zoomaxes[i]
                            zoomax.plot(val)
                            zoomax.set_title(algo_name + ' ' + key)
    plt.show()
示例#12
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--data_path', type=str, default='data')

    arg('--model', type=str, default='pnasnet5large')
    arg('--exp-name', type=str, default='pnasnet5large_2')

    arg('--batch-size', type=int, default=32)
    arg('--lr', type=float, default=1e-2)
    arg('--patience', type=int, default=4)
    arg('--n-epochs', type=int, default=15)

    arg('--n-folds', type=int, default=10)
    arg('--fold', type=int, default=0)

    arg('--random-seed', type=int, default=314159)

    arg('--num-workers', type=int, default=6)
    arg('--gpus', type=str, default='0')

    arg('--resize', type=int, default=331)
    arg('--crop', type=int, default=331)
    arg('--scale', type=str, default='0.4, 1.0')
    arg('--mean', type=str, default='0.485, 0.456, 0.406')
    arg('--std', type=str, default='0.229, 0.224, 0.225')

    args = parser.parse_args()
    print(args)

    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus

    #  os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '1'
    #  os.environ['MXNET_UPDATE_ON_KVSTORE'] = "0"
    #  os.environ['MXNET_EXEC_ENABLE_ADDTO'] = "1"
    #  os.environ['MXNET_USE_TENSORRT'] = "0"
    #  os.environ['MXNET_GPU_WORKER_NTHREADS'] = "2"
    #  os.environ['MXNET_GPU_COPY_NTHREADS'] = "1"
    #  os.environ['MXNET_OPTIMIZER_AGGREGATION_SIZE'] = "54"

    random_seed = args.random_seed
    set_random_seed(random_seed)

    path_to_data = Path(args.data_path)
    labels = pd.read_csv(path_to_data / 'labels.csv')
    num_classes = len(labels)

    train = pd.read_csv(path_to_data / 'train.csv.zip')

    n_folds = args.n_folds
    make_folds(train, n_folds, random_seed)

    mlb = MultiLabelBinarizer([str(i) for i in range(num_classes)])
    s = train['attribute_ids'].str.split()
    res = pd.DataFrame(mlb.fit_transform(s),
                       columns=mlb.classes_,
                       index=train.index)
    train = pd.concat([res, train['id'] + '.png', train['fold']], axis=1)

    gpu_count = len(args.gpus.split(','))
    batch_size = args.batch_size

    resize = args.resize
    crop = args.crop
    scale = tuple(float(x) for x in args.scale.split(','))
    mean = [float(x) for x in args.mean.split(',')]
    std = [float(x) for x in args.std.split(',')]

    #  jitter_param = 0.4
    #  lighting_param = 0.1
    labels_ids = [str(i) for i in range(num_classes)]
    num_workers = args.num_workers

    fold = args.fold
    train_transformer = get_train_transform(resize=resize,
                                            crop=crop,
                                            scale=scale,
                                            mean=mean,
                                            std=std)
    train_loader = mx.gluon.data.DataLoader(MXDataset(
        path_to_data / 'train', train[train['fold'] != fold].copy(),
        labels_ids, train_transformer),
                                            batch_size=batch_size * gpu_count,
                                            shuffle=True,
                                            num_workers=num_workers,
                                            pin_memory=True)

    test_transformer = get_test_transform(resize=resize,
                                          crop=crop,
                                          mean=mean,
                                          std=std)
    dev_loader = mx.gluon.data.DataLoader(MXDataset(
        path_to_data / 'train', train[train['fold'] == fold].copy(),
        labels_ids, test_transformer),
                                          batch_size=batch_size * gpu_count,
                                          shuffle=False,
                                          num_workers=num_workers,
                                          pin_memory=True)
    fp16 = True
    if args.model == 'pnasnet5large':
        net = get_pnasnet5large(num_classes)
    else:
        raise (f'No such model {args.model}')

    if fp16:
        net.cast('float16')
    ctx = [mx.gpu(i) for i in range(gpu_count)]
    net.collect_params().reset_ctx(ctx)

    epoch_size = len(train_loader)
    lr = args.lr * batch_size / 256
    steps = [step * epoch_size for step in [7, 9]]
    factor = 0.5
    warmup_epochs = 5
    warmup_mode = 'linear'
    schedule = mx.lr_scheduler.MultiFactorScheduler(
        step=steps,
        factor=factor,
        base_lr=lr,
        warmup_steps=warmup_epochs * epoch_size,
        warmup_mode=warmup_mode)

    if fp16:
        weight = 128
        opt = mx.optimizer.Adam(
            multi_precision=True,
            learning_rate=lr,
            rescale_grad=1 / weight,
            lr_scheduler=schedule,
        )
    else:
        opt = mx.optimizer.Adam(
            learning_rate=lr,
            lr_scheduler=schedule,
        )
    trainer = mx.gluon.Trainer(net.collect_params(), opt)
    if fp16:
        loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss(weight=weight)
    else:
        loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss()

    path_to_models = Path('models')
    path_to_model = path_to_models / args.exp_name
    path_to_exp = path_to_model / f'fold_{fold}'
    if not path_to_exp.exists():
        path_to_exp.mkdir(parents=True)

    patience = args.patience
    lr_reset_epoch = 1
    lr_changes = 0
    max_lr_changes = 2
    n_epochs = args.n_epochs
    best_dev_f2 = th2 = 0
    train_losses = []
    dev_losses, dev_f2s, dev_ths = [], [], []
    dev_met1, dev_met2 = [], []
    for epoch in range(1, n_epochs + 1):
        train_loss, all_predictions, all_targets = epoch_step(
            train_loader,
            desc=f'[ Training {epoch}/{n_epochs}.. ]',
            fp16=fp16,
            ctx=ctx,
            net=net,
            loss=loss,
            trainer=trainer)
        train_losses.append(train_loss)

        dev_loss, all_predictions, all_targets = epoch_step(
            dev_loader,
            desc=f'[ Validating {epoch}/{n_epochs}.. ]',
            fp16=fp16,
            ctx=ctx,
            net=net,
            loss=loss)
        dev_losses.append(dev_loss)

        metrics = {}
        argsorted = all_predictions.argsort(axis=1)
        for threshold in [0.01, 0.05, 0.1, 0.15, 0.2]:
            metrics[f'valid_f2_th_{threshold:.2f}'] = get_score(
                binarize_prediction(all_predictions, threshold, argsorted),
                all_targets)
        dev_met1.append(metrics)

        dev_f2 = 0
        for th in dev_met1[-1]:
            if dev_met1[-1][th] > dev_f2:
                dev_f2 = dev_met1[-1][th]
                th2 = th

        all_predictions = all_predictions / all_predictions.max(1,
                                                                keepdims=True)
        metrics = {}
        argsorted = all_predictions.argsort(axis=1)
        for threshold in [0.05, 0.1, 0.2, 0.3, 0.4]:
            metrics[f'valid_norm_f2_th_{threshold:.2f}'] = get_score(
                binarize_prediction(all_predictions, threshold, argsorted),
                all_targets)
        dev_met2.append(metrics)

        for th in dev_met2[-1]:
            if dev_met2[-1][th] > dev_f2:
                dev_f2 = dev_met2[-1][th]
                th2 = th

        dev_f2s.append(dev_f2)
        dev_ths.append(th2)
        if dev_f2 > best_dev_f2:
            best_dev_f2 = dev_f2
            best_th = th2
            if fp16:
                net.cast('float32')
                net.save_parameters((path_to_exp / 'model').as_posix())
                net.cast('float16')
            else:
                net.save_parameters((path_to_exp / 'model').as_posix())
            save_dict(
                {
                    'dev_loss': dev_loss,
                    'dev_f2': best_dev_f2,
                    'dev_th': best_th,
                    'epoch': epoch,
                    'dev_f2s': dev_f2s,
                    'dev_ths': dev_ths,
                    'dev_losses': dev_losses,
                    'dev_met1': dev_met1,
                    'dev_met2': dev_met2,
                }, path_to_exp / 'meta_data.pkl')
        elif (patience and epoch - lr_reset_epoch > patience
              and max(dev_f2s[-patience:]) < best_dev_f2):
            # "patience" epochs without improvement
            lr_changes += 1
            if lr_changes > max_lr_changes:
                break
            lr *= factor
            print(f'lr updated to {lr}')
            lr_reset_epoch = epoch
            if fp16:
                weight = 128
                opt = mx.optimizer.Adam(multi_precision=True,
                                        learning_rate=lr,
                                        rescale_grad=1 / weight)
            else:
                opt = mx.optimizer.Adam(learning_rate=lr)
            trainer = mx.gluon.Trainer(net.collect_params(), opt)

        plot_all(path_to_exp, train_losses, dev_losses, dev_f2s, dev_ths,
                 dev_met1, dev_met2)