Ejemplo n.º 1
0
def main():
    """
    Run all the worlds in the benchmark and tabulate their performance.
    """
    performance = []
    performance.append(tester.test(World_grid_1D))
    performance.append(tester.test(World_grid_1D_delay))
    performance.append(tester.test(World_grid_1D_chase))
    performance.append(tester.test(World_grid_1D_ms))
    performance.append(tester.test(World_grid_1D_noise))
    performance.append(tester.test(World_grid_2D))
    performance.append(tester.test(World_grid_2D_dc))
    performance.append(tester.test(World_image_1D))
    performance.append(tester.test(World_image_2D))
    performance.append(tester.test(World_fruit))
    print('Individual benchmark scores:')
    scores = []
    for score in performance:
        print('    {0:.2}, {1}'.format(score[0], score[1]))
        scores.append(score[0])
    print('Overall benchmark score: {0:.2}'.format(np.mean(scores)))

    # Block the program, displaying all plots.
    # When the plot windows are closed, the program closes.
    plt.show()
def test_one_layer_gaussian():
  """Test variational EM with pathwise gradients."""
  config = """
  learning_rate: 0.1
  n_iterations: 100
  gradient:
    estimator: pathwise
    n_samples: 7
    batch_size: 1
  layer_1:
    latent_distribution: gaussian
    p_z_variance: 1.
    size: 3
  layer_0:
    weight_distribution: point_mass
    p_w_variance: 1.
    data_size: 1
  """
  data = np.array([[3.3]])

  def test_posterior_predictive(sample: np.array) -> None:
    print('data:', data)
    print('posterior predictive sample:', sample)
    np.testing.assert_allclose(sample, data, rtol=1e-1)

  tester.test(
      config, data=data, test_fn=test_posterior_predictive)
def test_latent_layer():
    """Test matching q(z) to p(z) where p is Gaussian."""
    mean = 5.

    config = """
  n_iterations: 50
  learning_rate: 0.1
  gradient:
    estimator: pathwise
    n_samples: 1
    batch_size: 1
  layer_1:
    latent_distribution: gaussian
    size: 1
    p_z_mean: {}
    p_z_variance: 1.
  """.format(mean)

    def test_posterior_predictive(sample: np.array) -> None:
        print('posterior predictive mean:', sample)
        print('prior mean:', mean)
        np.testing.assert_allclose(sample, mean, rtol=1e-1)

    tester.test(config,
                data=np.array([np.nan]),
                test_fn=test_posterior_predictive)
def test_two_layer_poisson_score():
  """Test variational EM with score function gradients and gaussian latents."""
  config = """
  learning_rate: 0.1
  n_iterations: 100
  gradient:
    estimator: score_function
    n_samples: 32
    batch_size: 1
  layer_2:
    latent_distribution: poisson
    size: 2
  layer_1:
    latent_distribution: poisson
    weight_distribution: point_mass
    size: 3
  layer_0:
    weight_distribution: point_mass
    data_size: 1
  """
  data = np.array([[-3.2]])

  def test_posterior_predictive(sample: np.array) -> None:
    print('data:', data)
    print('posterior predictive sample:', sample)
    np.testing.assert_allclose(sample, data, rtol=0.3)

  tester.test(
      config, data=data, test_fn=test_posterior_predictive)
Ejemplo n.º 5
0
def main(_):

    if "absl.logging" in sys.modules:
        import absl.logging

        absl.logging.set_verbosity("info")
        absl.logging.set_stderrthreshold("info")

    config = FLAGS.config
    print(config)

    # Set the seed
    torch.manual_seed(config.seed)
    np.random.seed(config.seed)

    # Check if in the correct branch
    # group_name = config["model"][: config["model"].find("sa")]
    # if group_name not in ["z2", "mz2", "p4", "p4m"]:
    #     raise ValueError(
    #         "Mlp_encoding is required for rotations finer than 90 degrees. Please change to the mlp_encoding branch."
    #     )

    # initialize weight and bias
    os.environ["WANDB_API_KEY"] = "691777d26bb25439a75be52632da71d865d3a671"
    if not config.train:
        os.environ["WANDB_MODE"] = "dryrun"

    wandb.init(
        project="equivariant-attention",
        config=config,
        group=config["dataset"],
        entity="equivatt_team",
    )

    # Define the device to be used and move model to that device
    config["device"] = (
        "cuda:0" if (config.device == "cuda" and torch.cuda.is_available()) else "cpu"
    )
    model = get_model(config)

    # Define transforms and create dataloaders
    dataloaders = dataset.get_dataset(config, num_workers=4)

    # Create model directory and instantiate config.path
    model_path(config)

    if config.pretrained:
        # Load model state dict
        model.module.load_state_dict(torch.load(config.path), strict=False)

    # Train the model
    if config.train:
        # Print arguments (Sanity check)
        print(config)
        print(datetime.datetime.now())
        # Train the model
        trainer.train(model, dataloaders, config)

    # Test model
    tester.test(model, dataloaders["test"], config)
def test_gaussian_score_multivariate_data():
    config = """
  learning_rate: 0.1
  n_iterations: 100
  gradient:
    estimator: score_function
    n_samples: 16
    batch_size: 3
  layer_1:
    latent_distribution: gaussian
    size: 7
  layer_0:
    weight_distribution: point_mass
    data_size: 3
  """
    data = np.array([[20.3, -30.3, 15.3], [-30.3, -40.4, 23.5],
                     [15., -20.3, 28.9]])

    def test_posterior_predictive(sample: np.array, data: np.array) -> None:
        print('----')
        print('data:', data)
        print('posterior predictive sample:', sample)
        np.testing.assert_allclose(sample, np.expand_dims(data, 0), rtol=0.2)

    tester.test(config, data=data, test_fn=test_posterior_predictive)
Ejemplo n.º 7
0
def main():
    """
    Run all the worlds in the benchmark and tabulate their performance.
    """
    performance = []
    performance.append(tester.test(World_grid_1D))
    performance.append(tester.test(World_grid_1D_delay))
    performance.append(tester.test(World_grid_1D_chase))
    performance.append(tester.test(World_grid_1D_ms))
    performance.append(tester.test(World_grid_1D_noise))
    performance.append(tester.test(World_grid_2D))
    performance.append(tester.test(World_grid_2D_dc))
    performance.append(tester.test(World_image_1D))
    performance.append(tester.test(World_image_2D))
    performance.append(tester.test(World_fruit))
    print('Individual benchmark scores:')
    scores = []
    for score in performance:
        print('    {0:.2}, {1}'.format(score[0], score[1]))
        scores.append(score[0])
    print('Overall benchmark score: {0:.2}'.format(np.mean(scores))) 
    
    # Block the program, displaying all plots.
    # When the plot windows are closed, the program closes.
    plt.show()
Ejemplo n.º 8
0
def main():
    """Main function to run model."""
    config = get_config(os.environ)

    sys.path.append(os.path.join('tasks', config.task_folder))
    # pylint: disable=import-error
    from trainer import train
    from tester import test
    # pylint: enable=import-error

    if config.is_distributed:
        torch.cuda.set_device(config.local_rank)
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')

    logger = setup_logger(config.work_dir, distributed_rank=get_rank())
    logger.info(f'Using {config.num_gpus} GPUs.')
    logger.info(f'Collecting environment info:{get_env_info()}')
    logger.info(f'------------------------------')
    logger.info(f'Running configurations:')
    for key, val in config.__dict__.items():
        logger.info(f'  {key}: {val}')
    logger.info(f'------------------------------')

    if config.run_mode == 'train':
        train(config, logger)
    elif config.run_mode == 'test':
        test(config, logger)
Ejemplo n.º 9
0
def main():
    parser = argparse.ArgumentParser(
        description="checkPy: a simple python testing framework")
    parser.add_argument(
        "-m",
        action="store",
        dest="module",
        help=
        "provide a module name or path to run all tests from the module, or target a module for a specific test"
    )
    parser.add_argument(
        "-d",
        action="store",
        dest="githubLink",
        help="download tests from a Github repository and exit")
    parser.add_argument("-update",
                        action="store_true",
                        help="update all downloaded tests and exit")
    parser.add_argument("-list",
                        action="store_true",
                        help="list all download locations and exit")
    parser.add_argument("-clean",
                        action="store_true",
                        help="remove all tests from the tests folder and exit")
    parser.add_argument("file",
                        action="store",
                        nargs="?",
                        help="name of file to be tested")
    args = parser.parse_args()

    rootPath = os.sep.join(
        os.path.abspath(os.path.dirname(__file__)).split(os.sep)[:-1])
    if rootPath not in sys.path:
        sys.path.append(rootPath)

    if args.githubLink:
        downloader.download(args.githubLink)
        return

    if args.update:
        downloader.update()
        return

    if args.list:
        downloader.list()
        return

    if args.clean:
        downloader.clean()
        return

    if args.file and args.module:
        tester.test(args.file, module=args.module)
    elif args.file and not args.module:
        tester.test(args.file)
    elif not args.file and args.module:
        tester.testModule(args.module)
    else:
        parser.print_help()
        return
def test_multivariate_data():
  """Test variational EM with score function gradients."""
  config = """
  learning_rate: 0.1
  n_iterations: 100
  print_every: 100
  gradient:
    estimator: score_function
    n_samples: 16
    batch_size: 1
  layer_1:
    latent_distribution: gaussian
    size: 1
  layer_0:
    weight_distribution: point_mass
    data_size: 3
  """
  # shape [n_data, data_size]
  data = np.array([[30.3, -10., 5.]])

  def test_posterior_predictive(sample: np.array) -> None:
    print('data:', data)
    print('posterior predictive sample:', sample)
    np.testing.assert_allclose(sample, data, rtol=1e-1)

  tester.test(
      config, data=data, test_fn=test_posterior_predictive)
Ejemplo n.º 11
0
def main(args):
    # Create directories
    if not os.path.exists("./logs"):
        os.makedirs("./logs")
    if not os.path.exists("./pytorch_models"):
        os.makedirs("./pytorch_models")

    # Set logs
    tb_writer = SummaryWriter('./logs/tb_{0}'.format(args.log_name))
    log = set_log(args)

    # Create env
    env = make_env(log, args)

    # Set seeds
    env.seed(args.seed)
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    # Initialize policy
    agent = set_policy(env, tb_writer, log, args, name=args.algorithm)

    if args.test:
        from tester import test
        test(agent=agent, env=env, log=log, tb_writer=tb_writer, args=args)
    else:
        from trainer import train
        train(agent=agent, env=env, log=log, tb_writer=tb_writer, args=args)
Ejemplo n.º 12
0
def test_latent_poisson_layer():
    """Test matching q(z) to p(z) where p is Poisson."""
    mean = 5.

    config = """
  n_iterations: 100
  learning_rate: 0.1
  gradient:
    estimator: score_function
    n_samples: 16
    batch_size: 1
  layer_1:
    latent_distribution: poisson
    size: 1
    p_z_mean: {}
  """.format(mean)

    def test_posterior_predictive(sample: np.array) -> None:
        print('posterior predictive sample:', sample)
        print('prior mean:', mean)
        np.testing.assert_allclose(sample, mean, rtol=1e-1)

    tester.test(config,
                data=np.array([np.nan]),
                test_fn=test_posterior_predictive)
Ejemplo n.º 13
0
 def test_3(self):
     t = 0
     conjuntos_de_processos = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]
     n = 11
     atributos = [[1, 3, 5, 1], [2, 3, 3, 2], [3, 3, 5, 3], [4, 2, 1, 4],
                  [5, 2, 1, 5], [6, 3, 5, 6], [7, 1, 5, 7], [8, 2, 1, 8],
                  [9, 1, 3, 9], [10, 2, 2, 10], [11, 4, 5, 11]]
     tester.test(t, conjuntos_de_processos, n, atributos, 3)
Ejemplo n.º 14
0
 def test_5(self):
     n = 1
     conjuntos = [
         [1, 2, 3, 4, 5],
     ]
     m = 4
     operacoes = [(1, 2), (3, 4), (1, 5), (3, 5)]
     tester.test(n, conjuntos, m, operacoes, 5)
Ejemplo n.º 15
0
def model_process(count, model):
    opt = parse_opts()

    if opt.root_path != '':
        opt.video_path = os.path.join(opt.root_path, opt.video_path)
        opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path)
        opt.result_path = os.path.join(opt.root_path, opt.result_path)
        if opt.resume_path:
            opt.resume_path = os.path.join(opt.root_path, opt.resume_path)
        if opt.pretrain_path:
            opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path)
    opt.scales = [opt.initial_scale]
    for i in range(1, opt.n_scales):
        opt.scales.append(opt.scales[-1] * opt.scale_step)
    #opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
    opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset)
    opt.std = get_std(opt.norm_value)
    #print(opt)
    #print(opt.result_path)
    with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:
        json.dump(vars(opt), opt_file)

    torch.manual_seed(opt.manual_seed)

    #print(model)
    criterion = nn.CrossEntropyLoss()
    if not opt.no_cuda:
        criterion = criterion.cuda()

    if opt.no_mean_norm and not opt.std_norm:
        norm_method = Normalize([0, 0, 0], [1, 1, 1])
    elif not opt.std_norm:
        norm_method = Normalize(opt.mean, [1, 1, 1])
    else:
        norm_method = Normalize(opt.mean, opt.std)

    print('testing is run')

    if opt.test:
        spatial_transform = Compose([
            Scale(int(opt.sample_size / opt.scale_in_test)),
            CornerCrop(opt.sample_size, opt.crop_position_in_test),
            ToTensor(opt.norm_value), norm_method
        ])
        temporal_transform = LoopPadding(opt.sample_duration)
        target_transform = VideoID()

        test_data = get_test_set(opt, spatial_transform, temporal_transform,
                                 target_transform)

        test_loader = torch.utils.data.DataLoader(test_data,
                                                  batch_size=opt.batch_size,
                                                  shuffle=False,
                                                  num_workers=opt.n_threads,
                                                  pin_memory=True)

        tester.test(count, test_loader, model, opt, test_data.class_names)
Ejemplo n.º 16
0
def main(argv=None):
    writer = Writer(RESULTS_DIR)
    trainer = Trainer(RESULTS_DIR, 'train', writer)
    tester = Tester(RESULTS_DIR, 'valid', writer)

    step, _ = tester.test(EVAL_STEP_NUM)
    while (step < LAST_STEP):
        lr = learning_rate(step)
        step, _ = trainer.train(lr, EVAL_FREQUENCY, step, RESTORING_FILE)
        tester.test(EVAL_STEP_NUM, step)
Ejemplo n.º 17
0
def main(argv=None):
  writer = Writer(RESULTS_DIR)
  trainer = Trainer(RESULTS_DIR, 'train', writer)
  tester = Tester(RESULTS_DIR, 'valid', writer)

  step, _ = tester.test(EVAL_STEP_NUM)
  while (step < LAST_STEP):
    lr = learning_rate(step)
    step, _ = trainer.train(lr, EVAL_FREQUENCY, step, RESTORING_FILE)
    tester.test(EVAL_STEP_NUM, step)
Ejemplo n.º 18
0
def main(argv=None):
  writer = Writer(RESULTS_DIR)
  tester = Tester(RESULTS_DIR, 'valid', writer)
  status_file = os.path.join(RESULTS_DIR, 'checkpoint')
  last_update = 0
  while True:
    cur_time = os.stat(status_file).st_mtime
    if (cur_time > last_update):
      tester.test(EVAL_STEP_NUM)
      last_update = cur_time
    time.sleep(INTERVAL)
Ejemplo n.º 19
0
 def test_4(self):
     n = 5
     conjuntos = [
         [1],
         [2],
         [3],
         [4],
         [5],
     ]
     m = 4
     operacoes = [(1, 2), (3, 4), (1, 5), (3, 5)]
     tester.test(n, conjuntos, m, operacoes, 4)
Ejemplo n.º 20
0
def main():
    print_header("START", CONFIG, level=0)
    data = load(CONFIG)

    print(data.head())
    data.to_csv("./outputs/data.csv", index=False)

    describe(data, CONFIG)
    test(data, CONFIG)
    forecast(data, CONFIG)
    predict(data, CONFIG)
    report(data, CONFIG)
    print_header("DONE", CONFIG, level=0)
Ejemplo n.º 21
0
def test(fileName):
    """
	Run tests for a single file
	"""
    import caches
    caches.clearAllCaches()
    import tester
    tester.test(fileName)
    try:
        if __IPYTHON__:
            import matplotlib.pyplot
            matplotlib.pyplot.close("all")
    except:
        pass
Ejemplo n.º 22
0
def run():
    train_file = 'trained_models/english.crfsuite'
    train_data_percent = 1
    blocks = int(1 / train_data_percent)
    max_c = 5
    all_config = 2**max_c
    best_f1 = 0
    best_config = 0

    print("N, F1, P, R ")

    for c in range(all_config):
        config = get_feature_configuration(c, max_c)
        total_f1 = 0
        for i in range(blocks):
            train_d = get_eng_train_data(train_data_percent, i)
            test_d = get_eng_test_data(train_data_percent, i)
            train(train_d, train_file, config)
            f1 = test(test_d, train_file, config)
            total_f1 += f1
            #print(f1)
        total_f1 /= blocks
        #print("Config = " + str(config))
        #print("Number = " + str(c+1))
        #print("Final F1 = " + str(total_f1))
        #print("#############")
        #print(str(c+1) + ", " + str(total_f1) + ", " + str(total_f1) + ", " +  )
        if total_f1 > best_f1:
            best_f1 = total_f1
            best_config = config
    print("Best config = " + str(best_config))
    print("Best F1 = " + str(best_f1))
Ejemplo n.º 23
0
def train(args, model, device, data, input, target, test_input, test_target, optimizer, epoch):
    device = torch.device("cuda") # Sending to GPU

    def closure():
        optimizer.zero_grad() #Reset grads 
        out = model(input) # Passing batch through model

        loss = criterion(out, target)
        print(f"Loss: {loss.item()}")
        loss.backward() # Backprop
        return loss

    optimizer.step(closure) # Pass through optimizer

    y, future = test(model, test_input, test_target)

    # Display the graphs

    plt.figure(figsize = (30 , 10))
    plt.title("Predictions")
    plt.xlabel('x')
    plt.ylabel('y')
    plt.xticks()
    plt.yticks()
    def draw(yi, color):
        plt.plot(np.arange(input.size(1)), yi[:input.size(1)], color, linewidth
                = 2.0)
        plt.plot(np.arange(input.size(1), input.size(1)+future),
                 yi[input.size(1):], color + ':', linewidth = 2.0)
    draw(y[0], 'r')
    draw(y[1], 'g')
    draw(y[2], 'b')
    plt.savefig(f"outputs/predict_{epoch}.png")
    plt.close()
Ejemplo n.º 24
0
 def test_strip_neg(self):
     for fname, out_prefix in get_progs(r"(strip_\w+).in", 2):
         with self.subTest(name=fname):
             ret, *_ = tester.test(["cat"], 1., 0, False, False,
                                   f"tests{sep}{fname}",
                                   f"tests{sep}{out_prefix}.out")
             self.assertEqual(ret, "WA")
Ejemplo n.º 25
0
def main():
    # data
    train_dataloader, test_dataloader = data.mnist_dataloader()

    # model
    model = models.CNN_Net().to(cfg.device)

    # train
    trainer.train(model, train_dataloader, test_dataloader)

    # test
    model.load_state_dict(torch.load(cfg.best_model_path))
    tester.test(model, test_dataloader)

    # metrics
    evaluate.eval()
Ejemplo n.º 26
0
def index(request):
	
	context = {'jsdata' : ""}

	
	fobj = request.FILES # here you get the files needed
	if(len(fobj) != 0):
		f = fobj['sentFile']
		os.remove('tmp/test_image.jpg')
		path = default_storage.save('tmp/test_image.jpg', ContentFile(f.read()))
		jsdata = json.dumps({"res" : tester.test('tmp/test_image.jpg')})
		pr = tester.test('tmp/test_image.jpg')
		context = {'jsres' : pr[0], 'jstreat' : pr[1]}
		return render(request, 'diagnosis/result.html', context)
	else:
		return render(request, 'diagnosis/index.html', context)
Ejemplo n.º 27
0
 def test_strict(self):
     for _, fname, expected_ret in get_progs(r"(prog\w*_([A-Z]+)).py", 3):
         with self.subTest(name=fname):
             prefix = f"tests{sep}{fname}"
             ret, *_ = tester.test([executable, prefix + ".py"], 1.,
                                   16 * 1024**2, False, False,
                                   prefix + ".in", prefix + ".out")
             self.assertEqual(ret, expected_ret)
Ejemplo n.º 28
0
def evaluation(net, train_loader, test_loader, optimizer, epochs, device,
               train_acc, train_losses, test_acc, test_losses):

    # initialising cumulative train and test metrics which will store per epoch metrics
    # cum_train_acc = []
    # cum_train_losses = []
    # cum_test_acc = []
    # cum_test_losses = []

    # net = model.Cifar10_Net(norm_type = 'BN').to(device)
    # scheduler = StepLR(optimizer, step_size=6, gamma=0.1)

    for epoch in range(1, epochs + 1):
        print('\n Epoch:', epoch)
        trainer.train(net, device, train_loader, optimizer, epoch, train_acc,
                      train_losses)
        # scheduler.step()
        tester.test(net, device, test_loader, test_acc, test_losses)
Ejemplo n.º 29
0
def main(file: str = None):
    if file is None:
        print("Default config file will be used")
        file = "config.json"
        print("Using default config.json file")

    config = load_config(file)

    asyncio.get_event_loop().run_until_complete(test(config))
Ejemplo n.º 30
0
    def __call__(self, f):
        """
        When the object is called like a function, we run the method
        specified by self.method on the dataset in file 'f'.
        
        Args:
        f: path to file where the network corresponding the the ground 
           truth community structure lies.

        Returns:
        A string of test results.

        """
        print(self.method)
        print(f)
        G = initialize_graph(f)
        known = tester.parse(self.truth)
        known -= 1
        exporter = Exporter(f, G.n, False)
        arguments = Arguments(exporter, None, None, 0.02,
                              False, False, self.method)
        if self.method == Method.prop:
            labelprop.propagate(G, arguments)
            found = arguments.exporter.comlist[:, -1]
            numcoms = len(np.unique(found))
            test_results = tester.test(found, known)
        else:
            community_detect(G, arguments)
            hierarchy = arguments.exporter.comlist[:, 1:] # Exclude the 0...n col
            colresult = np.empty(shape=(hierarchy.shape[1], 4))
            lengths = []

            for j, column in enumerate(hierarchy.T):
                lengths.append(len(np.unique(column)))
                colresult[j, :] = tester.test(column, known)

            idx = get_best_column(colresult)
            test_results = colresult[idx, :]
            numcoms = lengths[idx]
                
        return format(os.path.basename(f), test_results[0], test_results[1],
                test_results[2], test_results[3], numcoms,
                len(np.unique(known)), str(arguments.method).split('.')[-1])
def get_data(x_min,x_max,x_step):
	tpr_points = []
	fpr_points = []
	ppv_points = []
	for i in range(((x_max-x_min)//x_step)):
		f,t,p = tester.test(x_min+(x_step*i))
		tpr_points.append(t)
		fpr_points.append(f)
		ppv_points.append(p)
		print("Total_Progress ="+str((i*1.0/((x_max-x_min)//x_step))))
	return (tpr_points,fpr_points,ppv_points)
Ejemplo n.º 32
0
def main():
    N_RUNS = 7
    benchmark_lifespan = 1e4
    overall_performance = []
    # Run all the worlds in the benchmark and tabulate their performance
    for i in range(N_RUNS):
        performance = []
        world = World_grid_1D(lifespan=benchmark_lifespan)
        performance.append(tester.test(world, show=False))
        world = World_grid_1D_ms(lifespan=benchmark_lifespan)
        performance.append(tester.test(world, show=False))
        world = World_grid_1D_noise(lifespan=benchmark_lifespan)
        performance.append(tester.test(world, show=False))
        world = World_grid_2D(lifespan=benchmark_lifespan)
        performance.append(tester.test(world, show=False))
        world = World_grid_2D_dc(lifespan=benchmark_lifespan)
        performance.append(tester.test(world, show=False))
        world = World_image_1D(lifespan=benchmark_lifespan)
        performance.append(tester.test(world, show=False))
        world = World_image_2D(lifespan=benchmark_lifespan)
        performance.append(tester.test(world, show=False))

        print "Individual benchmark scores: ", performance
        total = 0
        for val in performance:
            total += val
        mean_performance = total / len(performance)
        overall_performance.append(mean_performance)
        print "Overall benchmark score, ", i, "th run: ", mean_performance
    print "All overall benchmark scores: ", overall_performance

    # Automatically throw away the 2 highest and 2 lowest values
    # if you choose N_RUNS to be 7 or more.
    if N_RUNS >= 7:
        for i in range(2):
            highest_val = -10**6
            lowest_val = 10**6

            for indx in range(len(overall_performance)):
                if overall_performance[indx] > highest_val:
                    highest_val = overall_performance[indx]
                if overall_performance[indx] < lowest_val:
                    lowest_val = overall_performance[indx]
            overall_performance.remove(highest_val)
            overall_performance.remove(lowest_val)

    # Find the average of what's left
    sum_so_far = 0.
    for indx in range(len(overall_performance)):
        sum_so_far += overall_performance[indx]
    typical_performance = sum_so_far / len(overall_performance)
    print "Typical performance score: ", typical_performance

    # Block the program, displaying all plots.
    # When the plot windows are closed, the program closes.
    plt.show()
Ejemplo n.º 33
0
def main():
    N_RUNS = 7
    benchmark_lifespan = 1e4
    overall_performance = []
    # Run all the worlds in the benchmark and tabulate their performance
    for i in range(N_RUNS):
        performance = []
        world = World_grid_1D(lifespan=benchmark_lifespan)
        performance.append(tester.test(world, show=False))
        world = World_grid_1D_ms(lifespan=benchmark_lifespan)
        performance.append(tester.test(world, show=False))
        world = World_grid_1D_noise(lifespan=benchmark_lifespan)
        performance.append(tester.test(world, show=False))
        world = World_grid_2D(lifespan=benchmark_lifespan)
        performance.append(tester.test(world, show=False))
        world = World_grid_2D_dc(lifespan=benchmark_lifespan)
        performance.append(tester.test(world, show=False))
        world = World_image_1D(lifespan=benchmark_lifespan)
        performance.append(tester.test(world, show=False))
        world = World_image_2D(lifespan=benchmark_lifespan)
        performance.append(tester.test(world, show=False))

        print "Individual benchmark scores: " , performance
        total = 0
        for val in performance:
            total += val
        mean_performance = total / len(performance)
        overall_performance.append(mean_performance)
        print "Overall benchmark score, ", i , "th run: ", mean_performance 
    print "All overall benchmark scores: ", overall_performance 
    
    # Automatically throw away the 2 highest and 2 lowest values 
    # if you choose N_RUNS to be 7 or more.
    if N_RUNS >= 7:
        for i in range(2):
            highest_val = -10 ** 6
            lowest_val = 10 ** 6
            
            for indx in range(len(overall_performance)):
                if overall_performance[indx] > highest_val:
                    highest_val = overall_performance[indx]
                if overall_performance[indx] < lowest_val:
                    lowest_val = overall_performance[indx]
            overall_performance.remove(highest_val)
            overall_performance.remove(lowest_val)

    # Find the average of what's left
    sum_so_far = 0.
    for indx in range(len(overall_performance)):
        sum_so_far += overall_performance[indx]
    typical_performance = sum_so_far / len(overall_performance)
    print "Typical performance score: ", typical_performance 
    
    # Block the program, displaying all plots.
    # When the plot windows are closed, the program closes.
    plt.show()
Ejemplo n.º 34
0
    def run_tests(self):
        exe_loc = self.root_dir + '/SDPSolver.exe'
        CLI.fancy_print('Enter Implementation type',
                        '[1] Kiarash\'s implementation',
                        '[2] Derivative method (Keivan & Hamidreza)',
                        '[3] Generalized eigenvalue method (Hamidreza)')
        imp_type = input()
        test_folder = CLI.select_dirs(
            self.root_dir + "/SDPA/testSet",
            "Select one of the test directories below:")
        CLI.list_dirs(
            self.root_dir + "/SDPA/testSet/" + test_folder,
            "Enter test name you wish to run (Regex is also supported e.g. \"*.dat-s\")",
            TestFormats.regex_format())
        tests_reg = input()
        try:
            try:
                os.mkdir(self.root_dir + "/out")
            except:
                pass
            sv_dir = os.getcwd()
            os.chdir(self.root_dir)
            os.chdir('build')
            executable_path = os.getcwd()
            os.chdir(self.root_dir + '/SDPA/testSet')
            os.chdir(test_folder)
            tests_dir = os.getcwd()
            os.chdir(self.root_dir + "/out")
            output_path = os.getcwd()
            os.chdir(sv_dir)

            test(executable_path=executable_path,
                 tests_dir=tests_dir,
                 output_path=output_path,
                 test_reg=tests_reg,
                 implementation_type=imp_type)
        except:
            print('problem in running tester')

        self.back_to_main()
        return
def main(argv=None):

  writer = Writer(RESULTS_DIR)
  trainer = Trainer(RESULTS_DIR, 'train', writer)
  tester = Tester(RESULTS_DIR, 'valid', writer)

  params_file = os.path.join(RESULTS_DIR, PARAMS_FILE)
  if (os.path.isfile(params_file)):
    with open(params_file, 'r') as handle:
      params = json.load(handle)
  else:
    params = {}
    params['min_test_step'], params['min_test_loss'] = tester.test(EVAL_STEP_NUM)
    params['step'] = params['min_test_step']
    params['unchanged'] = 0
    params['num_decays'] = 0
    params['learning_rate'] = LEARNING_RATE

  while (params['num_decays'] <= MAX_DECAYS):
    params['step'], _ = trainer.train(params['learning_rate'], EVAL_FREQUENCY,
                                      params['step'], RESTORING_FILE)
    _, test_loss = tester.test(EVAL_STEP_NUM, params['step'])
    if (test_loss < params['min_test_loss']):
      params['min_test_loss'] = test_loss
      params['min_test_step'] = params['step']
      params['unchanged'] = 0
    else:
      params['unchanged'] += EVAL_FREQUENCY
      if (params['unchanged'] >= PATIENCE):
        params['learning_rate'] *= DECAY_FACTOR
        params['num_decays'] += 1
        params['step'] = params['min_test_step']
        params['unchanged'] = 0

    with open(params_file, 'w') as handle:
      json.dump(params, handle, indent=2)
    print(params)
Ejemplo n.º 36
0
def test(batch, remote, debug, dependency = []):
    params = cache.get("batch/%s/params" % batch, remote)
    numEpisodes = params['episodes']['num']
    
    i_ = range(numEpisodes)
    f = lambda i : tester.test(batch, params, i, remote, debug)
    
    logging.info("running %s test instances" % len(i_))
    if (remote):
        k_ = cloud.map(f, i_, _label = "%s/test" % batch, _depends_on = dependency, _type = 'c1', _max_runtime = 30)
        logging.info("k_ %s" % k_)
        return k_
    else:
        results = map(f, i_)
        return results
Ejemplo n.º 37
0
#
#    refsets = collections.defaultdict(set)
#    testsets = collections.defaultdict(set)
#
#    for i, (feats, label) in enumerate(testfeats):
#        refsets[label].add(i)
#        observed = classifier.classify(feats)
#        testsets[observed].add(i)
#
#
#    print '#### POSITIVE ####'
#    print 'pos precision:', nltk.metrics.precision(refsets['pos'], testsets['pos'])
#    print 'pos recall:', nltk.metrics.recall(refsets['pos'], testsets['pos'])
#    print 'pos F-measure:', nltk.metrics.f_measure(refsets['pos'], testsets['pos'])
#    print
#    print '#### NEGATIVE ####'
#    print 'neg precision:', nltk.metrics.precision(refsets['neg'], testsets['neg'])
#    print 'neg recall:', nltk.metrics.recall(refsets['neg'], testsets['neg'])
#    print 'neg F-measure:', nltk.metrics.f_measure(refsets['neg'], testsets['neg'])
#
#    print '--------------------'
#    print 'Classifier Accuracy:', util.accuracy(classifier, testfeats)
#    classifier.show_most_informative_features()

if __name__ == "__main__":
    # example train and tester.test to display accuracies
    train(train_samples=2000, wordcount_samples=1000, wordcount_range=2000, force_update=False, verbose=True)
    from tester import test

    test()
Ejemplo n.º 38
0
    def train(self, trainInput, trainTarget, validInput=None, validTarget=None):
        self.initFolder()
        trainOpt = self.trainOpt
        if validInput is None and validTarget is None:
            X, T, VX, VT = self.initData(\
                trainInput, trainTarget, \
                split=self.trainOpt['needValid'])
        else:
            X = trainInput
            T = trainTarget
            VX = validInput
            VT = validTarget
        N = X.shape[0]
        numEpoch = trainOpt['numEpoch']
        calcError = trainOpt['calcError']
        numExPerBat = trainOpt['batchSize']
        progressWriter = ProgressWriter(N, width=80)
        logger = Logger(self, csv=trainOpt['writeRecord'])
        logger.logMsg('Trainer ' + self.name)
        plotter = Plotter(self)
        bestVscore = None
        bestTscore = None
        bestEpoch = 0
        nAfterBest = 0
        stop = False

        # Train loop through epochs
        for epoch in range(0, numEpoch):
            E = 0
            correct = 0
            total = 0

            if trainOpt['shuffle']:
                X, T = vt.shuffleData(X, T, self.random)

            batchStart = 0
            while batchStart < N:
                # Batch info
                batchEnd = min(N, batchStart + numExPerBat)
                numExThisBat = batchEnd - batchStart

                # Write progress bar
                if trainOpt['progress']:
                    progressWriter.increment(amount=numExThisBat)

                # Forward
                Y_bat = self.model.forward(X[batchStart:batchEnd], dropout=True)
                T_bat = T[batchStart:batchEnd]

                # Loss
                Etmp, dEdY = self.model.getCost(Y_bat, T_bat)
                E += Etmp * numExThisBat / float(N)

                # Backward
                self.model.backward(dEdY)

                # Update
                self.model.updateWeights()

                # Prediction error
                if calcError:
                    rate_, correct_, total_ = \
                        tester.calcRate(self.model, Y_bat, T_bat)
                    correct += correct_
                    total += total_

                batchStart += numExPerBat

            # Store train statistics
            if calcError:
                rate = correct / float(total)
                self.rate[epoch] = rate
            self.loss[epoch] = E

            if not trainOpt.has_key('criterion'):
                Tscore = E
            else:
                if trainOpt['criterion'] == 'loss':
                    Tscore = E
                elif trainOpt['criterion'] == 'rate':
                    Tscore = 1 - rate
                else:
                    raise Exception('Unknown stopping criterion "%s"' % \
                        trainOpt['criterion'])

            # Run validation
            if trainOpt['needValid']:
                VY = tester.test(self.model, VX)
                VE, dVE = self.model.getCost(VY, VT)
                self.validLoss[epoch] = VE
                if calcError:
                    Vrate, correct, total = tester.calcRate(self.model, VY, VT)
                    self.validRate[epoch] = Vrate

                # Check stopping criterion
                if not trainOpt.has_key('criterion'):
                    Vscore = VE
                else:
                    if trainOpt['criterion'] == 'loss':
                        Vscore = VE
                    elif trainOpt['criterion'] == 'rate':
                        Vscore = 1 - Vrate
                    else:
                        raise Exception('Unknown stopping criterion "%s"' % \
                            trainOpt['criterion'])
                if (bestVscore is None) or (Vscore < bestVscore):
                    bestVscore = Vscore
                    bestTscore = Tscore
                    nAfterBest = 0
                    bestEpoch = epoch
                    # Save trainer if VE is best
                    if trainOpt['saveModel']:
                        self.save()
                else:
                    nAfterBest += 1
                    # Stop training if above patience level
                    if nAfterBest > trainOpt['patience']:
                        print 'Patience level reached, early stop.'
                        print 'Will stop at score ', bestTscore
                        stop = True
            else:
                if trainOpt['saveModel']:
                    self.save()
                if trainOpt.has_key('stopScore') and \
                    Tscore < trainOpt['stopScore']:
                    print 'Training score is lower than %.4f , ealy stop.' % \
                        trainOpt['stopScore'] 
                    stop = True                    

            # Anneal learning rate
            self.model.updateLearningParams(epoch)

            # Print statistics
            logger.logTrainStats()
            if trainOpt['needValid']:
                print 'BT: %.4f' % bestTscore
            
            # Plot train curves
            if trainOpt['plotFigs']:
                plotter.plot()

            # Terminate
            if stop:       
                break

        # Record final epoch number
        self.stoppedTrainScore = bestTscore
        self.stoppedEpoch = bestEpoch if trainOpt['needValid'] else epoch
def main(argv=None):

  hyper_file = os.path.join(RESULTS_DIR, HYPER_FILE)
  if (os.path.isfile(hyper_file)):
    with open(hyper_file, 'r') as handle:
      hyper = json.load(handle)
  else:
    hyper = {}
    hyper['min_test_step'] = LAYERS_NUM
    hyper['step'] = hyper['min_test_step']
    hyper['unchanged'] = 0
    hyper['restfile'] = RESTORING_FILE

  while (hyper['unchanged'] < HYPER_PATIENCE):
    results_dir = os.path.join(RESULTS_DIR, str(hyper['step']))
    writer = Writer(results_dir)
    trainer = Trainer(results_dir, 'train', writer, hyper['step'])
    tester = Tester(results_dir, 'valid', writer, hyper['step'])

    params_file = os.path.join(results_dir, PARAMS_FILE)
    if (os.path.isfile(params_file)):
      with open(params_file, 'r') as handle:
        params = json.load(handle)
    else:
      params = {}
      params['min_test_step'], params['min_test_loss'] = tester.test(EVAL_STEP_NUM)
      params['step'] = params['min_test_step']
      params['unchanged'] = 0
      params['num_decays'] = 0
      params['learning_rate'] = LEARNING_RATE

    if ('min_test_loss' not in hyper):
      hyper['min_test_loss'] = params['min_test_loss']

    while (params['num_decays'] <= MAX_DECAYS):
      params['step'], _ = trainer.train(params['learning_rate'], EVAL_FREQUENCY,
                                        params['step'], hyper['restfile'])
      _, test_loss = tester.test(EVAL_STEP_NUM, params['step'])
      if (test_loss < params['min_test_loss']):
        params['min_test_loss'] = test_loss
        params['min_test_step'] = params['step']
        params['unchanged'] = 0
      else:
        params['unchanged'] += EVAL_FREQUENCY
        if (params['unchanged'] >= PATIENCE):
          params['learning_rate'] *= DECAY_FACTOR
          params['num_decays'] += 1
          params['step'] = params['min_test_step']
          params['unchanged'] = 0

      with open(params_file, 'w') as handle:
        json.dump(params, handle, indent=2)
      print(params)

    #tester.test(step_num=None, init_step=params['min_test_step'])

    if (params['min_test_loss'] < hyper['min_test_loss']):
      hyper['min_test_loss'] = params['min_test_loss']
      hyper['min_test_step'] = hyper['step']
      hyper['unchanged'] = 0
    else:
      hyper['unchanged'] += 1

    hyper['restfile'] = os.path.join(results_dir, model_file(params['min_test_step']))
    hyper['step'] += 2
    with open(hyper_file, 'w') as handle:
      json.dump(hyper, handle, indent=2)
    print(hyper)

    print('\n NEW HYPER PARAMETER: %d' %hyper['step'])
Ejemplo n.º 40
0
    def train(
                self, 
                trainInput, 
                trainTarget, 
                trainInputWeights=None,
                validInput=None, 
                validTarget=None,
                validInputWeights=None):
        self.initFolder()
        trainOpt = self.trainOpt
        if validInput is None and validTarget is None:
            X, T, VX, VT = self.initData(\
                trainInput, trainTarget, \
                split=self.trainOpt['needValid'])
        else:
            X = trainInput
            T = trainTarget
            VX = validInput
            VT = validTarget
        N = X.shape[0]
        print 'Epoch size:', N
        numEpoch = trainOpt['numEpoch']
        calcError = trainOpt['calcError']
        numExPerBat = trainOpt['batchSize']
        print 'Batch size:', numExPerBat
        numBatPerStep = trainOpt['stepSize'] \
            if trainOpt.has_key('stepSize') \
            else int(np.ceil(N / float(numExPerBat)))
        print 'Step size:', numBatPerStep
        numExPerStep = numExPerBat * numBatPerStep \
            if trainOpt.has_key('stepSize') \
            else N
        print 'Examples per step:', numExPerStep
        numStepPerEpoch = int(np.ceil(
            N / float(numExPerStep))) \
            if trainOpt.has_key('stepSize') \
            else 1
        print 'Steps per epoch:', numStepPerEpoch
        progressWriter = ProgressWriter(numExPerStep, width=80)
        logger = Logger(self, csv=trainOpt['writeRecord'])
        logger.logMsg('Trainer ' + self.name)
        plotter = Plotter(self)
        bestVscore = None
        bestTscore = None
        bestStep = 0
        totalBat = 0
        step = 0
        totalStep = 0
        nAfterBest = 0
        stop = False
        self.loss = np.zeros((numStepPerEpoch * numEpoch))
        self.validLoss = np.zeros((numStepPerEpoch * numEpoch))
        self.rate = np.zeros((numStepPerEpoch * numEpoch))
        self.validRate = np.zeros((numStepPerEpoch * numEpoch))
        
        # Train loop through epochs
        for epoch in range(0, numEpoch):
            self.epoch = epoch
            epochE = 0
            epochCorrect = 0
            epochTotal = 0
            
            # Shuffle data
            if trainOpt['shuffle']:
                X, T = vt.shuffleData(X, T, self.random)
            
            # Every step, validate
            for step in range(0, numStepPerEpoch):
                stepStart = step * numExPerStep
                stepEnd = min((step + 1) * numExPerStep, N)
                numExThisStep = stepEnd - stepStart
                E = 0
                correct = 0
                total = 0
                self.totalStep = totalStep
                
                # Every batch forward-backward
                for batch in range(0, numBatPerStep):
                    batchStart = stepStart + batch * numExPerBat
                    if batchStart > N:
                        break
                    batchEnd = min(
                        stepStart + (batch + 1) * numExPerBat, stepEnd)
                    numExThisBat = batchEnd - batchStart
                    self.totalBatch = totalBat
                    
                    if trainOpt['progress']:
                        progressWriter.increment(amount=numExThisBat)
                    
                    # Forward
                    Y_bat = self.model.forward(
                        X[batchStart:batchEnd], dropout=True)
                    T_bat = T[batchStart:batchEnd]
                    
                    # Loss
                    Etmp, dEdY = self.model.getCost(
                        Y_bat, T_bat, weights=trainInputWeights)
                    E += Etmp * numExThisBat / float(numExThisStep)
                    epochE += Etmp * numExThisBat / float(N)
                    
                    # Backward
                    self.model.backward(dEdY)
                    
                    # Update
                    self.model.updateWeights()
                    
                    # Prediction error
                    if calcError:
                        rate_, correct_, total_ = \
                            tester.calcRate(self.model, Y_bat, T_bat)
                        correct += correct_
                        total += total_
                        epochCorrect += correct_
                        epochTotal += total_
                    
                    totalBat += 1
                
                # Store train statistics
                if calcError:
                    rate = correct / float(total)
                    self.rate[totalStep] = rate
                self.loss[totalStep] = E
                
                # Early stop
                if not trainOpt.has_key('criterion'):
                    Tscore = E
                else:
                    if trainOpt['criterion'] == 'loss':
                        Tscore = E
                    elif trainOpt['criterion'] == 'rate':
                        Tscore = 1 - rate
                    else:
                        raise Exception('Unknown stopping criterion "%s"' % \
                            trainOpt['criterion'])
                
                # Run validation
                if trainOpt['needValid']:
                    VY = tester.test(self.model, VX)
                    VE, dVE = self.model.getCost(
                        VY, VT, weights=validInputWeights)
                    self.validLoss[totalStep] = VE
                    if calcError:
                        Vrate, correct, total = tester.calcRate(
                            self.model, VY, VT)
                        self.validRate[totalStep] = Vrate
                    
                    # Check stopping criterion
                    if not trainOpt.has_key('criterion'):
                        Vscore = VE
                    else:
                        if trainOpt['criterion'] == 'loss':
                            Vscore = VE
                        elif trainOpt['criterion'] == 'rate':
                            Vscore = 1 - Vrate
                        else:
                            raise Exception(
                                'Unknown stopping criterion "%s"' % \
                                trainOpt['criterion'])
                    if (bestVscore is None) or (Vscore < bestVscore):
                        bestVscore = Vscore
                        bestTscore = Tscore
                        nAfterBest = 0
                        bestStep = totalStep

                        # Save trainer if VE is best
                        if trainOpt['saveModel']:
                            self.save()
                    else:
                        nAfterBest += 1
                        # Stop training if above patience level
                        if nAfterBest > trainOpt['patience']:
                            print 'Patience level reached, early stop.'
                            print 'Will stop at score ', bestTscore
                            stop = True
                else:
                    if trainOpt['saveModel']:
                        self.save()
                    if trainOpt.has_key('stopScore') and \
                        Tscore < trainOpt['stopScore']:
                        print \
                            'Training score is lower than %.4f , ealy stop.' % \
                            trainOpt['stopScore'] 
                        stop = True
                
                logger.logTrainStats()
                if trainOpt['needValid']:
                    print 'P: %d' % nAfterBest,
                print self.name
                
                if stop:
                    break
            
            # Store train statistics
            if calcError:
                epochRate = epochCorrect / float(epochTotal)
            print 'Epoch Final: %d TE: %.4f TR:%.4f' % \
                (epoch, epochE, epochRate)
            
            # Anneal learning rate
            self.model.updateLearningParams(epoch)
            
            # Plot train curves
            if trainOpt['plotFigs']:
                plotter.plot()
                
            # Terminate
            if stop:       
                break
                
        # Report best train score
        self.stoppedTrainScore = bestTscore
Ejemplo n.º 41
0
  
def test_missing_is_stale():
  import os, dagger
  os.system('touch tmp1')
  n1 = dagger.node('tmp1')
  n1.update()

  n2 = dagger.node('tmp_missing')
  n2.update()

  return n1.stale == None and n2.stale
  
#############################################
tests = [
test_add,
test_format_abs,
test_format_date,
test_format_time,
test_format_base,
test_format_base_dot,
test_format_root,
test_format_root_dot,
test_update_time,
test_update_hash,
test_missing_is_stale,
]

from tester import test
import sys
sys.exit( not test(tests=tests) )
Ejemplo n.º 42
0
def test(args):
    import tester
    tester.test()
Ejemplo n.º 43
0
from tester import test


# D. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
  # +++your code here+++
  return


test(sort_last([(1, 3), (3, 2), (2, 1)]),
     [(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
     [(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
     [(2, 2), (1, 3), (3, 4, 5), (1, 7)])


# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
  # +++your code here+++
  return

Ejemplo n.º 44
0
from tester import test


# E. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
  # +++your code here+++
  return


test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')


# F. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
  # +++your code here+++
  return
Ejemplo n.º 45
0
from tester import test


# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
  # +++your code here+++
  return


test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')


# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
  # +++your code here+++
  return

Ejemplo n.º 46
0
  delete('missing')
  
  try: os.remove('6')
  except: pass

  d = dagger.dagger()
  d.add('1', ['2','3'])
  d.add('2', ['missing'])
  d.add('4', ['5'])
  d.add('5', ['6'])
  d.run(allpaths=True)
  
  it = d.iter(['4'])
  
  return it.next() == ['4']

#############################################
tests = [
test_iter,
test_iterator_all,
test_iterator_names,
test_missing,
test_top_fresh,
test_top_stale,
]

from tester import test
import sys
if __name__=='__main__':
  test(tests=tests)
#  sys.exit( not test(tests=tests) )
Ejemplo n.º 47
0
from tester import test


# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
  # +++your code here+++
  return


test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)


# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
  # +++your code here+++
  return


test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
Ejemplo n.º 48
0
    def OnCheckPSP(self, event):
        "Find defects and errors, if complete, change to the next phase"
        evt_id = event.GetId()
        if evt_id == ID_COMPILE:
            self.SetPSPPhase('compile')
        elif evt_id == ID_TEST:
            self.SetPSPPhase('test')
        if self.active_child:
            phase = self.GetPSPPhase()
            defects = []    # static checks and failed tests
            errors = []     # sanity checks (planning & postmortem)
            if phase == "planning":
                # check plan summary completeness
                for phase, times in self.psptimetable.cells.items():
                    if not times['plan']:
                        errors.append("Complete %s estimate time!" % phase)
            elif phase == "design" or phase == "code":
                #TODO: review checklist?
                pass
            elif phase == "compile":
                # run "static" chekers to find coding defects (pep8, pyflakes)
                import checker
                defects.extend(checker.check(self.active_child.GetFilename()))
            elif phase == "test":
                # run doctests to find defects
                import tester
                defects.extend(tester.test(self.active_child.GetFilename()))
            elif phase == "postmortem":
                # check that all defects are fixed
                for defect in self.psp_defect_list.data.values():
                    if not defect['remove_phase']:
                        errors.append("Defect %(number)s not fixed!" % defect)

            # add found defects (highlight them in the editor window)
            line_numbers = set()
            for defect in defects:
                self.NotifyDefect(**defect)
                errors.append("Defect found: %(summary)s" % defect)
                if defect['lineno'] is not None:
                    line_numbers.add(defect['lineno'])
            self.active_child.HighlightLines(line_numbers)

            # show errors
            if errors:
                dlg = wx.MessageDialog(self, "\n".join(errors), 
                       "PSP Check Phase Errors", wx.ICON_EXCLAMATION | wx.OK)
                dlg.ShowModal()
                dlg.Destroy()
                self._mgr.GetPane("psp_defects").Show(True)
                self._mgr.Update()

            # phase completed? project completed?
            if not defects and not errors:
                i = PSP_PHASES.index(phase) + 1
                if i < len(PSP_PHASES):
                    phase = PSP_PHASES[i]
                else:
                    phase = ""
                self.OnStopPSP(event)
                self.SetPSPPhase(phase)
        else:
            dlg = wx.MessageDialog(self, "No active file, cannot check it.\n"
                    "Change PSP phase manually if desired.", 
                    "PSP Check Phase Errors", wx.ICON_EXCLAMATION)
            dlg.ShowModal()
            dlg.Destroy()