예제 #1
2
def test_deidentify_dataset(capsys):
    datasets.create_dataset(
        service_account_json,
        project_id,
        cloud_region,
        dataset_id)

    datasets.deidentify_dataset(
        service_account_json,
        project_id,
        cloud_region,
        dataset_id,
        destination_dataset_id,
        whitelist_tags)

    # Clean up
    datasets.delete_dataset(
        service_account_json, project_id, cloud_region, dataset_id)
    datasets.delete_dataset(
        service_account_json,
        project_id,
        cloud_region,
        destination_dataset_id)

    out, _ = capsys.readouterr()

    # Check that de-identify worked
    assert 'De-identified data written to' in out
예제 #2
1
파일: main.py 프로젝트: xclmj/mlbench-old
def main():
    options = get_options()

    options = initialize(options)
    options = create_dataset(options, train=True)
    options = create_dataset(options, train=False)

    model = get_model(options)

    optimizer = get_optimizer(options, model)

    scheduler = get_scheduler(options, optimizer)

    # Criterions are like `torch.nn.CrossEntropyLoss()`
    criterion = get_criterion(options, model)

    metrics = get_metrics(options)

    model = convert_dtype(options.dtype, model)
    criterion = convert_dtype(options.dtype, criterion)
    if options.use_cuda:
        model.cuda()
        criterion.cuda()

    options = checkpoint.maybe_resume(options, model, optimizer, scheduler)

    controlflow = get_controlflow(options)
    controlflow(model=model,
                optimizer=optimizer,
                criterion=criterion,
                metrics=metrics,
                scheduler=scheduler,
                options=options)
예제 #3
0
def test_CRUD_dataset(capsys, crud_dataset_id):
    datasets.create_dataset(project_id, cloud_region, crud_dataset_id)

    @retry(
        wait_exponential_multiplier=1000,
        wait_exponential_max=10000,
        stop_max_attempt_number=10,
        retry_on_exception=retry_if_server_exception,
    )
    def get_dataset():
        datasets.get_dataset(project_id, cloud_region, crud_dataset_id)

    get_dataset()

    datasets.list_datasets(project_id, cloud_region)

    datasets.delete_dataset(project_id, cloud_region, crud_dataset_id)

    out, _ = capsys.readouterr()

    # Check that create/get/list/delete worked
    assert "Created dataset" in out
    assert "Time zone" in out
    assert "Dataset" in out
    assert "Deleted dataset" in out
예제 #4
0
파일: qlassifier.py 프로젝트: tuliplan/qibo
    def __init__(self, name, layers, grid=11, test_samples=1000, seed=0):
        """Class with all computations needed for classification.

        Args:
            name (str): Name of the problem to create the dataset, to choose between
                ['circle', '3 circles', 'square', '4 squares', 'crown', 'tricrown', 'wavy lines'].
            layers (int): Number of layers to use in the classifier.
            grid (int): Number of points in one direction defining the grid of points.
                If not specified, the dataset does not follow a regular grid.
            samples (int): Number of points in the set, randomly located.
                This argument is ignored if grid is specified.
            seed (int): Random seed.

        Returns:
            Dataset for the given problem (x, y).
        """
        np.random.seed(seed)
        self.name = name
        self.layers = layers
        self.training_set = create_dataset(name, grid=grid)
        self.test_set = create_dataset(name, samples=test_samples)
        self.target = create_target(name)
        self.params = np.random.randn(layers * 4)
        self._circuit = self._initialize_circuit()
        try:
            os.makedirs('results/' + self.name + '/%s_layers' % self.layers)
        except:
            pass
예제 #5
0
파일: main.py 프로젝트: mlbench/mlbench
def main():
    options = get_options()

    options = initialize(options)
    options = create_dataset(options, train=True)
    options = create_dataset(options, train=False)

    model = get_model(options)

    optimizer = get_optimizer(options, model)

    scheduler = get_scheduler(options, optimizer)

    # Criterions are like `torch.nn.CrossEntropyLoss()`
    criterion = get_criterion(options, model)

    metrics = get_metrics(options)

    model = convert_dtype(options.dtype, model)
    criterion = convert_dtype(options.dtype, criterion)
    if options.use_cuda:
        model.cuda()
        criterion.cuda()

    options = checkpoint.maybe_resume(options, model, optimizer, scheduler)

    controlflow = get_controlflow(options)
    controlflow(model=model, optimizer=optimizer, criterion=criterion,
                metrics=metrics, scheduler=scheduler, options=options)
예제 #6
0
def test_get_set_dataset_iam_policy(capsys):
    datasets.create_dataset(
        service_account_json,
        project_id,
        cloud_region,
        dataset_id)

    get_response = datasets.get_dataset_iam_policy(
        service_account_json,
        project_id,
        cloud_region,
        dataset_id)

    set_response = datasets.set_dataset_iam_policy(
        service_account_json,
        project_id,
        cloud_region,
        dataset_id,
        'serviceAccount:[email protected]',
        'roles/viewer')

    # Clean up
    datasets.delete_dataset(
        service_account_json,
        project_id,
        cloud_region,
        dataset_id)

    out, _ = capsys.readouterr()

    assert 'etag' in get_response
    assert 'bindings' in set_response
    assert len(set_response['bindings']) == 1
    assert 'python-docs-samples-tests' in str(set_response['bindings'])
    assert 'roles/viewer' in str(set_response['bindings'])
def test_CRUD_dataset(capsys):
    datasets.create_dataset(
        service_account_json,
        api_key,
        project_id,
        cloud_region,
        dataset_id)

    datasets.get_dataset(
        service_account_json, api_key, project_id, cloud_region, dataset_id)

    datasets.list_datasets(
        service_account_json, api_key, project_id, cloud_region)

    # Test and also clean up
    datasets.delete_dataset(
        service_account_json, api_key, project_id, cloud_region, dataset_id)

    out, _ = capsys.readouterr()

    # Check that create/get/list/delete worked
    assert 'Created dataset' in out
    assert 'Time zone' in out
    assert 'Dataset' in out
    assert 'Deleted dataset' in out
예제 #8
0
    def __call__(self, model, optimizer, criterion, metrics, scheduler,
                 options):
        """Train models and perform validation.

        :param model: a pytorch model to be trained and validated.
        :type model: nn.Module
        :param optimizer: an optimizer for the given model.
        :param criterion: loss function. 
        :param metrics: metrics like TopKAccuracy.
        :param scheduler: a scheduler for hyperparameters.
        :param options: a global object containing all of the options.
        :type options: argparse.Namespace
        """
        # define some parameters for training.
        log.info(
            'There are {} epochs, {} mini-batches per epoch (batch size:{}).'.
            format(options.train_epochs, options.train_num_batches,
                   options.batch_size), 0)

        # train the model and evaluate the model per args.eval_freq
        max_epochs = min(options.train_epochs, options.max_train_steps)\
            if options.max_train_steps else options.train_epochs
        start_epoch = options.runtime['current_epoch'] if options.resume else 0
        options.runtime['records'] = options.runtime.get('records', [])
        options.runtime['cumu_time_val'] = options.runtime.get(
            'cumu_time_val', [])
        options.runtime['cumu_time_train'] = options.runtime.get(
            'cumu_time_train', [])

        dist.barrier()

        timeit = Timeit(0 if len(options.runtime['cumu_time_val']) ==
                        0 else options.runtime['cumu_time_val'][-1])
        for epoch in range(start_epoch, max_epochs):
            options.runtime['current_epoch'] = epoch

            # schedule learning rates
            if options.lr_scheduler_level == 'epoch':
                scheduler.step()

            # Per epoch information.
            log.info(
                "Current epoch : {} : lr={} : time={:10.3e}".format(
                    epoch, scheduler.get_lr(), timeit.cumu), 0)

            train_epoch(model, optimizer, criterion, scheduler, options,
                        timeit)

            if options.validation:
                timeit.pause()
                do_validate(model, optimizer, criterion, metrics, scheduler,
                            options, timeit)
                timeit.resume()

            if options.repartition_per_epoch:
                options = create_dataset(options, train=True)
                options = create_dataset(options, train=False)
예제 #9
0
 def create():
     try:
         datasets.create_dataset(project_id, location, dataset_id)
     except HttpError as err:
         # We ignore 409 conflict here, because we know it's most
         # likely the first request failed on the client side, but
         # the creation suceeded on the server side.
         if err.resp.status == 409:
             print("Got exception {} while creating dataset".format(err.resp.status))
         else:
             raise
예제 #10
0
    def __init__(self, params):
        """Initialize BenchmarkCNN.

        Args:
          params: Params tuple, typically created by make_params or
                  make_params_from_flags.
        Raises:
          ValueError: Unsupported params settings.
        """
        self.params = params
        if FLAGS.deterministic:
            assert self.params.data_dir is None
            self.dataset = datasets.create_dataset(None,
                                                    self.params.data_name)
        else:
            self.dataset = datasets.create_dataset(self.params.data_dir,
                                               self.params.data_name)
        self.model = model_config.get_model_config(self.params.model,
                                                   self.dataset)
        self.data_format = self.params.data_format
        self.resize_method = self.params.resize_method
        self.use_synthetic_gpu_images = self.dataset.use_synthetic_gpu_images()
        self.num_batches_for_eval = self.params.num_batches_for_eval

        if ((self.params.num_epochs_per_decay or
             self.params.learning_rate_decay_factor) and
                not (
                        self.params.learning_rate and self.params.num_epochs_per_decay and
                        self.params.learning_rate_decay_factor)):
            raise ValueError('If one of num_epochs_per_decay or '
                             'learning_rate_decay_factor is set, both must be set'
                             'and learning_rate must be set')
        if (self.params.minimum_learning_rate and
                not (
                        self.params.learning_rate and self.params.num_epochs_per_decay and
                        self.params.learning_rate_decay_factor)):
            raise ValueError('minimum_learning_rate requires learning_rate,'
                             'num_epochs_per_decay, and '
                             'learning_rate_decay_factor to be set')

        # Use the batch size from the command line if specified, otherwise use the
        # model's default batch size.  Scale the benchmark's batch size by the
        # number of GPUs.
        if self.params.batch_size > 0:
            self.model.set_batch_size(self.params.batch_size)
        self.batch_size = self.model.get_batch_size()
        self.batch_group_size = self.params.batch_group_size
        self.loss_scale = None
        self.loss_scale_normal_steps = None
        self.image_preprocessor = self.get_image_preprocessor()
예제 #11
0
    def __call__(self, model, optimizer, criterion, metrics, scheduler, options):
        """Train models and perform validation.

        :param model: a pytorch model to be trained and validated.
        :type model: nn.Module
        :param optimizer: an optimizer for the given model.
        :param criterion: loss function. 
        :param metrics: metrics like TopKAccuracy.
        :param scheduler: a scheduler for hyperparameters.
        :param options: a global object containing all of the options.
        :type options: argparse.Namespace
        """
        # define some parameters for training.
        log.info('There are {} epochs, {} mini-batches per epoch (batch size:{}).'
                 .format(options.train_epochs, options.train_num_batches,
                         options.batch_size), 0)

        # train the model and evaluate the model per args.eval_freq
        max_epochs = min(options.train_epochs, options.max_train_steps)\
            if options.max_train_steps else options.train_epochs
        start_epoch = options.runtime['current_epoch'] if options.resume else 0
        options.runtime['records'] = options.runtime.get('records', [])
        options.runtime['cumu_time_val'] = options.runtime.get('cumu_time_val', [])
        options.runtime['cumu_time_train'] = options.runtime.get('cumu_time_train', [])

        dist.barrier()

        timeit = Timeit(0 if len(options.runtime['cumu_time_val']) == 0
                        else options.runtime['cumu_time_val'][-1])
        for epoch in range(start_epoch, max_epochs):
            options.runtime['current_epoch'] = epoch

            # schedule learning rates
            if options.lr_scheduler_level == 'epoch':
                scheduler.step()

            # Per epoch information.
            log.info("Current epoch : {} : lr={} : time={:10.3e}"
                     .format(epoch, scheduler.get_lr(), timeit.cumu), 0)

            train_epoch(model, optimizer, criterion, scheduler, options, timeit)

            if options.validation:
                timeit.pause()
                do_validate(model, optimizer, criterion, metrics, scheduler, options, timeit)
                timeit.resume()

            if options.repartition_per_epoch:
                options = create_dataset(options, train=True)
                options = create_dataset(options, train=False)
예제 #12
0
def test_patch_dataset(capsys):
    datasets.create_dataset(service_account_json, api_key, project_id,
                            cloud_region, dataset_id)

    datasets.patch_dataset(service_account_json, api_key, project_id,
                           cloud_region, dataset_id, time_zone)

    # Clean up
    datasets.delete_dataset(service_account_json, api_key, project_id,
                            cloud_region, dataset_id)

    out, _ = capsys.readouterr()

    # Check that the patch to the time zone worked
    assert 'UTC' in out
예제 #13
0
def main():
    ds = datasets.create_dataset()

    if not os.path.exists(FLAGS.output_dir):
        os.makedirs(FLAGS.output_dir)

    pool = multiprocessing.Pool(processes=16)
    inputs = [(ds, index)
              for index in range(min(FLAGS.process_first, ds.get_size()))]
    results = pool.map(process, inputs)

    info_table = {}
    info_table["classnames"] = ds.get_classnames()
    info_table["size"] = ds.get_size()

    for index, info in enumerate(results):
        image_filename, label_filename = ds.get_filenames(index)
        image_filename = os.path.basename(image_filename)
        label_filename = os.path.basename(label_filename)

        info["image_filename"] = image_filename
        info["label_filename"] = label_filename

        info_table[str(index)] = info

    filename = os.path.join(FLAGS.output_dir, "info.json")
    logging.info("Writing info to %s." % filename)
    with open(filename, "wt") as f:
        json.dump(info_table, f, indent=4, sort_keys=True)
예제 #14
0
def test(config):
    config['num_threads'] = 1                     # only <num_threads = 1> supported when testing_usr

    dataset = create_dataset(config)
    model = create_model(config)
    model.setup(config)

    result_root_path = os.path.join(config['checkpoints_dir'], config['name'], config['results_dir'],'epoch_'+str(config['test_epoch']))
    tools.mkdir(result_root_path)
    print(" create testing_usr folder: " + result_root_path)

    # set module to testing_usr mode
    model.eval()

    for i, data in enumerate(dataset):
        model.set_input(data)  # push test datasets to module
        model.test()  # forward module

        datapoint_offset = model.test_result[0][1]
        datapoint_offset = (datapoint_offset.squeeze(0)).cpu().data.numpy()
        datapoint_bg = ((model.test_result[1][1].squeeze(0).permute(1,2,0)).cpu().data.numpy()+1.0)*0.5*255.
        index = data["PATH"].cpu().data.numpy()[0]
        plot_motion.plot_motion_field(motion_vector=datapoint_offset*5.,
                                      savepath=os.path.join(result_root_path,str(index)+".jpg"),
                                      bg=datapoint_bg.astype(np.int),
                                      limits=0,
                                      plot_interval=8,
                                      plot_size=10)

        print("Testing forward-- complete:" + str(i + 1) + "  total:" + str(dataset.__len__()))

    print("Testing result have been saved!")
예제 #15
0
def main(extra_flags):
    # Check no unknown flags was passed.
    assert len(extra_flags) >= 1
    if len(extra_flags) > 1:
        raise ValueError('Received unknown flags: %s' % extra_flags[1:])

    # Get parameters from FLAGS passed.
    params = parameters.make_params_from_flags()
    deploy.setup_env(params)
    parameters.save_params(params, params.train_dir)

    # TF log...
    tfversion = deploy.tensorflow_version_tuple()
    deploy.log_fn('TensorFlow:  %i.%i' % (tfversion[0], tfversion[1]))

    # Create model and dataset.
    dataset = datasets.create_dataset(params.data_dir, params.data_name,
                                      params.data_subset)
    model = models.create_model(params.model, dataset)
    set_model_params(model, params)

    # Run CNN trainer.
    trainer = deploy.TrainerCNN(dataset, model, params)
    trainer.print_info()
    trainer.run()
예제 #16
0
def main(extra_flags):
    # Check no unknown flags was passed.
    assert len(extra_flags) >= 1
    if len(extra_flags) > 1:
        raise ValueError('Received unknown flags: %s' % extra_flags[1:])

    # Get parameters from FLAGS passed.
    params = parameters.make_params_from_flags()
    deploy.setup_env(params)
    # Training parameters, update using json file.
    params = replace_with_train_params(params)

    # TF log...
    tfversion = deploy.tensorflow_version_tuple()
    deploy.log_fn('TensorFlow:  %i.%i' % (tfversion[0], tfversion[1]))

    # Create model and dataset.
    dataset = datasets.create_dataset(
        params.data_dir, params.data_name, params.data_subset)
    model = models.create_model(params.model, dataset)
    train.set_model_params(model, params)

    # Set the number of batches to the size of the eval dataset.
    params = params._replace(
        num_batches=int(dataset.num_examples_per_epoch() / (params.batch_size * params.num_gpus)))
    # Run CNN trainer.
    trainer = deploy.TrainerCNN(dataset, model, params)
    trainer.print_info()
    trainer.run()
예제 #17
0
def main():
    parser = option_parser.get_parser()
    args = parser.parse_args()
    test_device = args.cuda_device
    eval_seq = args.eval_seq

    para_path = os.path.join(args.save_dir, 'para.txt')
    with open(para_path, 'r') as para_file:
        argv_ = para_file.readline().split()[1:]
        args = parser.parse_args(argv_)

    args.cuda_device = test_device if torch.cuda.is_available() else 'cpu'
    args.is_train = False
    args.rotation = 'quaternion'
    args.eval_seq = eval_seq
    character_names = get_character_names(args)

    dataset = create_dataset(args, character_names)

    model = create_model(args, character_names, dataset)
    model.load(epoch=20000)

    for i, motions in tqdm(enumerate(dataset), total=len(dataset)):
        model.set_input(motions)
        model.test()
예제 #18
0
def test_CRUD_dataset(capsys, crud_dataset_id):
    datasets.create_dataset(project_id, cloud_region, crud_dataset_id)

    datasets.get_dataset(project_id, cloud_region, crud_dataset_id)

    datasets.list_datasets(project_id, cloud_region)

    datasets.delete_dataset(project_id, cloud_region, crud_dataset_id)

    out, _ = capsys.readouterr()

    # Check that create/get/list/delete worked
    assert 'Created dataset' in out
    assert 'Time zone' in out
    assert 'Dataset' in out
    assert 'Deleted dataset' in out
예제 #19
0
def main(extra_flags):
    # Check no unknown flags was passed.
    assert len(extra_flags) >= 1
    if len(extra_flags) > 1:
        raise ValueError('Received unknown flags: %s' % extra_flags[1:])

    # Get parameters from FLAGS passed.
    params = parameters.make_params_from_flags()
    deploy.setup_env(params)
    parameters.save_params(params, params.train_dir)

    # TF log...
    tfversion = deploy.tensorflow_version_tuple()
    deploy.log_fn('TensorFlow:  %i.%i' % (tfversion[0], tfversion[1]))

    # Create model and dataset.
    dataset = datasets.create_dataset(
        params.data_dir, params.data_name, params.data_subset)
    model = models.create_model(params.model, dataset)
    set_model_params(model, params)

    # Run CNN trainer.
    trainer = deploy.TrainerCNN(dataset, model, params)
    trainer.print_info()
    trainer.run()
예제 #20
0
파일: run.py 프로젝트: chao-tan/TC-GAN
def test(config):
    config[
        'num_threads'] = 1  # only <num_threads = 1> supported when testing_usr
    config['flip'] = False  # not allowed to flip image
    config['add_colorjit'] = False  # not allowed to use color jitting

    dataset = create_dataset(config)
    model = create_model(config)
    model.setup(config)

    result_root_path = os.path.join(config['checkpoints_dir'], config['name'],
                                    config['results_dir'])
    util.mkdir(result_root_path)
    print(" create testing_usr folder: " + result_root_path)

    # set module to testing_usr mode
    model.eval()

    for i, data in enumerate(dataset):
        model.set_input(data)  # push test datasets to module
        model.test()  # forward module

        for k in range(len(model.test_result)):
            img = util.tensor2im(model.test_result[k][1])
            img_path = os.path.join(result_root_path, data['PATH'][0])
            util.save_image(img, img_path)

        print("Testing forward-- complete:" + str(i + 1) + "  total:" +
              str(dataset.__len__()))
예제 #21
0
def _rebuild_from_checkpoint(checkpoint_file,
                             same_crop_load_size=False,
                             **ds_kwargs):
    """
    Loads a model and dataset based on the config in a particular dir.
    Args:
        checkpoint_file: dir containing args.json and model checkpoints
        **ds_kwargs: override kwargs for dataset

    Returns: loaded model, initialized dataset

    """
    checkpoint_dir = os.path.dirname(checkpoint_file)
    # read the config file  so we can load in the model
    loaded_opt = load(copy.deepcopy(opt),
                      os.path.join(checkpoint_dir, "args.json"))
    # force certain attributes in the loaded cfg
    override_namespace(
        loaded_opt,
        is_train=False,
        batch_size=1,
        shuffle_data=opt.shuffle_data,  # let inference opt take precedence
    )
    if same_crop_load_size:  # need to override this if we're using intermediates
        loaded_opt.load_size = loaded_opt.crop_size
    model = create_model(loaded_opt)
    # loads the checkpoint
    model.load_model_weights("generator", checkpoint_file).eval()
    model.print_networks(opt.verbose)

    dataset = create_dataset(loaded_opt, **ds_kwargs)

    return model, dataset
예제 #22
0
 def run_me(
     held_out_set,
     model,
     approx,
     approxProx,
     runSlowAppxCV,
     kwargs,
     log_dir=None,
     save_each=False,
     non_fixed_dims=None,
 ):
     model = copy.deepcopy(model)
     w0 = model.params.get_free().copy()
     Ntrain = model.training_data.X.shape[0]
     held_out_idxs = list(held_out_set)
     weights = np.ones(Ntrain)
     weights[held_out_idxs] = 0
     label = hash(".".join([str(idx) for idx in held_out_idxs]))
     params = model.retrain_with_weights(weights,
                                         doProxAppx=approxProx,
                                         doIJAppx=approx,
                                         doNSAppx=runSlowAppxCV,
                                         log_dir=log_dir,
                                         label=label,
                                         non_fixed_dims=non_fixed_dims,
                                         **kwargs)
     #params[inds] = 0.0
     held_out_X = model.training_data.X[held_out_idxs]
     held_out_Y = model.training_data.Y[held_out_idxs]
     total_error = model.get_error(
         datasets.create_dataset(held_out_X, held_out_Y, copy=False))
     return total_error, params
예제 #23
0
def test_dataset():
    dataset = datasets.create_dataset(service_account_json, project_id,
                                      cloud_region, dataset_id)

    yield dataset

    # Clean up
    datasets.delete_dataset(service_account_json, project_id, cloud_region,
                            dataset_id)
예제 #24
0
def main(_):
    import logging
    import sys
    from tensorflow.python.platform import tf_logging

    logging.basicConfig(level=logging.DEBUG,
                        stream=sys.stderr,
                        format='%(levelname)s '
                        '%(asctime)s.%(msecs)06d: '
                        '%(filename)s: '
                        '%(lineno)d '
                        '%(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S')

    tf_logger = tf_logging._get_logger()
    tf_logger.propagate = False

    print_flags()

    if not tf.gfile.Exists(FLAGS.eval_log_dir):
        tf.gfile.MakeDirs(FLAGS.eval_log_dir)

    dataset = datasets.create_dataset()
    model = models.create_model(num_char_classes=dataset.num_char_classes,
                                max_seq_len=dataset.max_seq_len,
                                null_code=dataset.null_code)

    data = data_loader.get_data(dataset)
    endpoints = model.create_base(data.images, is_training=False)
    eval_ops, prediction, label = model.create_eval_ops(data, endpoints)

    tf.train.get_or_create_global_step()

    session_config = tf.ConfigProto(allow_soft_placement=True)
    session_config.gpu_options.allow_growth = True

    if FLAGS.eval_type == 'once':
        slim.evaluation.evaluate_once(master=FLAGS.master,
                                      checkpoint_path=FLAGS.ckpt_path,
                                      logdir=FLAGS.eval_log_dir,
                                      num_evals=FLAGS.num_batches,
                                      eval_op=eval_ops,
                                      session_config=session_config)
    elif FLAGS.eval_type == 'loop':
        slim.evaluation.evaluation_loop(
            master=FLAGS.master,
            checkpoint_dir=FLAGS.train_log_dir,
            logdir=FLAGS.eval_log_dir,
            eval_op=eval_ops,
            num_evals=FLAGS.num_batches,
            eval_interval_secs=FLAGS.eval_interval_secs,
            max_number_of_evaluations=FLAGS.number_of_steps,
            timeout=2000,
            session_config=session_config)
    else:
        pass
예제 #25
0
def train(config):
    dataset = create_dataset(config)
    model = create_model(config)
    model.setup(config)
    dataset_size = len(dataset)  # get the size of dataset
    print('The number of training images = %d' % dataset_size)
    visualizer = Visualizer(config)  # create visualizer to show/save iamge


    total_iters = 0  # total iteration for datasets points
    t_data = 0

    # 从训练的模型中恢复训练
    if int(config['resume_epoch']) > 0:
        print("\n resume traing from rpoch " + str(int(config['resume_epoch']))+" ...")
        model.resume_scheduler(int(config['resume_epoch']))
        model.load_networks(config['resume_epoch'])
        model.load_optimizers(config['resume_epoch'])

    # outter iteration for differtent epoch; we save module via <epoch_count> and <epoch_count>+<save_latest_freq> options
    for epoch in range(int(config['resume_epoch'])+1, int(config['epoch']) +1):
        epoch_start_time = time.time()  # note the starting time for current epoch
        iter_data_time = time.time()  # note the starting time for datasets iteration
        epoch_iter = 0  # iteration times for current epoch, reset to 0 for each epoch

        # innear iteration for single epoch
        for i, data in enumerate(dataset):
            iter_start_time = time.time()  # note the stating time for current iteration
            if total_iters % int(config['print_freq']) == 0:  # note during time each <print_freq> times iteration
                t_data = iter_start_time - iter_data_time
            visualizer.reset()
            total_iters = total_iters + int(config['train_batch_size'])
            epoch_iter = epoch_iter + int(config['train_batch_size'])
            model.set_input(data)  # push loading image to the module
            model.optimize_parameters()  # calculate loss, gradient and refresh module parameters

            if total_iters % int(config['display_freq']) == 0:  # show runing result in visdom each <display_freq> iterations
                save_result = total_iters % int(config['update_html_freq']) == 0  # save runing result to html each <update_html_freq> iteartions
                visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)

            if total_iters % int(config['print_freq']) == 0:  # print/save training loss to console each <print_freq> iterations
                losses = model.get_current_losses()
                t_comp = (time.time() - iter_start_time) / int(config['train_batch_size'])
                visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data)
                if int(config['display_id']) > 0:
                    visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)

        if epoch % int(config['save_epoch_freq']) == 0:  # save module each <save_epoch_freq> epoch iterations
            print('saving the module at the end of epoch %d, iters %d' % (epoch, total_iters))
            model.save_networks(epoch)
            model.save_optimizers(epoch)

        print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, int(config['epoch']), time.time() - epoch_start_time))

        # update learning rate after each epoch
        model.update_learning_rate()
def main():
    parser = option_parser.get_parser()
    parser.add_argument('--input_bvh', type=str, required=True)
    parser.add_argument('--target_bvh', type=str, required=True)
    parser.add_argument('--test_type', type=str, required=True)
    parser.add_argument('--output_filename', type=str, required=True)

    args = parser.parse_args()

    # argsparse can't take space character as part of the argument
    args.input_bvh = recover_space(args.input_bvh)
    args.target_bvh = recover_space(args.target_bvh)
    
    # Windows 10 fix
    # args.output_filename = recover_space(args.output_filename) -- recover_space removes underline in 'examples/intra_structure' and replaces it with a space 'examples/iintra structure' resulting in bad path 
    args.output_filename = args.output_filename
    character_names, file_id, src_id = eval_prepare(args)
    input_character_name = args.input_bvh.split('/')[-2]
    output_character_name = args.target_bvh.split('/')[-2]
    output_filename = args.output_filename


    test_device = args.cuda_device
    eval_seq = args.eval_seq

    para_path = os.path.join(args.save_dir, 'para.txt')
    with open(para_path, 'r') as para_file:
        argv_ = para_file.readline().split()[1:]
        args = option_parser.get_parser().parse_args(argv_)

    args.cuda_device = test_device if torch.cuda.is_available() else 'cpu'
    args.is_train = False
    args.rotation = 'quaternion'
    args.eval_seq = eval_seq

    dataset = create_dataset(args, character_names)

    model = create_model(args, character_names, dataset)
    model.load(epoch=20000)

    input_motion = []
    for i, character_group in enumerate(character_names):
        input_group = []
        for j in range(len(character_group)):
            new_motion = dataset.get_item(i, j, file_id[i][j])
            new_motion.unsqueeze_(0)
            new_motion = (new_motion - dataset.mean[i][j]) / dataset.var[i][j]
            input_group.append(new_motion)
        input_group = torch.cat(input_group, dim=0)
        input_motion.append([input_group, list(range(len(character_group)))])

    model.set_input(input_motion)
    model.test()

    os.system('cp "{}/{}/0_{}.bvh" "./{}"'.format(model.bvh_path, output_character_name, src_id, output_filename))
예제 #27
0
def estimate_neutral_frames(cnn_lstm_model, trainlist, configuration, flag):
    dictionary = {}
    target_train_labels = []
    target_train_features = []

    for i in range(len(trainlist)):

        videos = trainlist[i]
        imgPath, label, _, _ = videos[0].strip().split(' ')
        head_tail = os.path.normpath(imgPath)
        ind_comps = head_tail.split(os.sep)
        subject_id = ind_comps[-3]
        print(subject_id)
        features = []
        labels = []
        print(len(trainlist[i]))
        trainloader = create_dataset(configuration, [trainlist[i]])
        print(len(trainloader))

        for batch_idx, source in enumerate(trainloader):
            with torch.no_grad():
                source_inputs, source_labels, _ = source
                sourcefeature, source_outputs, source_domain_output = cnn_lstm_model(
                    source_inputs, 0, 0, 0)
                t = source_inputs.size(2)
                sourceframe_feature = F.interpolate(
                    sourcefeature.squeeze(3).squeeze(3), t,
                    mode='linear')  #.squeeze(1)
                sourceframe_feature = sourceframe_feature.view(
                    sourceframe_feature.shape[0] *
                    sourceframe_feature.shape[2], -1)  #.squeeze()
                source_labels = source_labels.view(
                    source_labels.shape[0] * source_labels.shape[1], -1)
                features.append(sourceframe_feature.detach().cpu().numpy())
                labels.append(source_labels.detach().cpu().numpy())

        features = np.concatenate(features, 0)
        labels = np.concatenate(labels, 0)

        mean_face = np.mean(features, axis=0)
        source_mc_features = features - mean_face
        target_train_features.append(source_mc_features)
        target_train_labels.append(labels)
        print(mean_face.shape)
        #subject_neutral_features.append(mean_face)
        dictionary[subject_id] = mean_face

    #np.save('target_train_features.npy', target_train_features)
    #np.save('target_train_labels.npy', target_train_labels)
    print(dictionary[subject_id])
    f = open(flag + "_neutral_frames.pkl", "wb")
    pickle.dump(dictionary, f)
    f.close()
    sys.exit()
예제 #28
0
def test_CRUD_dataset(capsys):
    datasets.create_dataset(service_account_json, project_id, cloud_region,
                            dataset_id)

    datasets.get_dataset(service_account_json, project_id, cloud_region,
                         dataset_id)

    datasets.list_datasets(service_account_json, project_id, cloud_region)

    # Test and also clean up
    datasets.delete_dataset(service_account_json, project_id, cloud_region,
                            dataset_id)

    out, _ = capsys.readouterr()

    # Check that create/get/list/delete worked
    assert 'Created dataset' in out
    assert 'Time zone' in out
    assert 'Dataset' in out
    assert 'Deleted dataset' in out
예제 #29
0
def create_model():
    width, height = get_dataset_image_size()
    dataset = datasets.create_dataset()

    model = models.create_model(num_char_classes=dataset.num_char_classes,
                                max_seq_len=dataset.max_seq_len,
                                null_code=dataset.null_code)

    images_placeholder = tf.placeholder(tf.float32, shape=[1, height, width, 3])
    images = images_placeholder
    endpoints = model.create_base(images, is_training=False)
    return images_placeholder, endpoints
예제 #30
0
파일: test.py 프로젝트: cycle13/TCLNet
def test(config):
    config[
        'num_threads'] = 1  # only <num_threads = 1> supported when testing_usr
    config['flip'] = False  # not allowed to flip image
    config['status'] = 'test'
    config['crop_scale'] = 1.0

    dataset = create_dataset(config)
    model = create_model(config)
    model.setup(config)

    result_root_path = os.path.join(config['checkpoints_dir'], config['name'],
                                    'evaluation')
    util.mkdir(result_root_path)
    util.mkdir(os.path.join(result_root_path, 'prediction_distance'))
    util.mkdir(os.path.join(result_root_path, 'prediction_heatmap'))
    print(" create evaluate folder: " + result_root_path)

    # set module to testing_usr mode
    model.eval()

    save_npy = np.ndarray(shape=(dataset.__len__() + 1, 2), dtype=np.float)
    save_npy[0][0], save_npy[0][1] = -1, -1

    for i, data in enumerate(dataset):
        model.set_input(data)  # push test datasets to module
        model.test()  # forward module

        datapoints = (model.test_result[0][1]).cpu().data.numpy()
        index = data["PATH"].cpu().data.numpy()[0]
        save_npy[index][0], save_npy[index][1] = datapoints[0][0], datapoints[
            0][1]

        dist_img = model.test_result[1][1]
        util.save_image(
            util.tensor2im(dist_img),
            os.path.join(result_root_path, 'prediction_distance',
                         str(index) + ".png"))

        heatmap_img = model.test_result[2][1]
        util.save_image(
            util.tensor2im(heatmap_img),
            os.path.join(result_root_path, 'prediction_heatmap',
                         str(index) + ".png"))

        print("Evaluate forward-- complete:" + str(i + 1) + "  total:" +
              str(dataset.__len__()))

    np.save(os.path.join(result_root_path, 'regression.npy'), save_npy)
    l2_dist, easy_dist, hard_dist = evaluation.evaluate_detailed(save_npy)
    print("Testing npy result have been saved! Evaluation distance: " +
          str(round(l2_dist)) + "(" + str(round(easy_dist)) + "," +
          str(round(hard_dist)) + ")")
예제 #31
0
    def init_fn(self):
        # create training dataset
        self.train_ds = create_dataset(self.options.dataset, self.options)

        # create Mesh object
        self.mesh = Mesh()
        self.faces = self.mesh.faces.to(self.device)

        # create GraphCNN
        self.graph_cnn = GraphCNN(self.mesh.adjmat,
                                  self.mesh.ref_vertices.t(),
                                  num_channels=self.options.num_channels,
                                  num_layers=self.options.num_layers).to(
                                      self.device)

        # SMPL Parameter regressor
        self.smpl_param_regressor = SMPLParamRegressor().to(self.device)

        # Setup a joint optimizer for the 2 models
        self.optimizer = torch.optim.Adam(
            params=list(self.graph_cnn.parameters()) +
            list(self.smpl_param_regressor.parameters()),
            lr=self.options.lr,
            betas=(self.options.adam_beta1, 0.999),
            weight_decay=self.options.wd)

        # SMPL model
        self.smpl = SMPL().to(self.device)

        # Create loss functions
        self.criterion_shape = nn.L1Loss().to(self.device)
        self.criterion_keypoints = nn.MSELoss(reduction='none').to(self.device)
        self.criterion_regr = nn.MSELoss().to(self.device)

        # Pack models and optimizers in a dict - necessary for checkpointing
        self.models_dict = {
            'graph_cnn': self.graph_cnn,
            'smpl_param_regressor': self.smpl_param_regressor
        }
        self.optimizers_dict = {'optimizer': self.optimizer}

        # Renderer for visualization
        self.renderer = Renderer(faces=self.smpl.faces.cpu().numpy())

        # LSP indices from full list of keypoints
        self.to_lsp = list(range(14))

        # Optionally start training from a pretrained checkpoint
        # Note that this is different from resuming training
        # For the latter use --resume
        if self.options.pretrained_checkpoint is not None:
            self.load_pretrained(
                checkpoint_file=self.options.pretrained_checkpoint)
def test_patch_dataset(capsys):
    datasets.create_dataset(
        service_account_json,
        project_id,
        cloud_region,
        dataset_id)

    datasets.patch_dataset(
        service_account_json,
        project_id,
        cloud_region,
        dataset_id,
        time_zone)

    # Clean up
    datasets.delete_dataset(
        service_account_json, project_id, cloud_region, dataset_id)

    out, _ = capsys.readouterr()

    # Check that the patch to the time zone worked
    assert 'UTC' in out
def test_dataset():
    dataset = datasets.create_dataset(
        service_account_json,
        project_id,
        cloud_region,
        dataset_id)

    yield dataset

    # Clean up
    datasets.delete_dataset(
        service_account_json,
        project_id,
        cloud_region,
        dataset_id)
예제 #34
0
def main(_):
    print_flags()
    import logging
    import sys
    from tensorflow.python.platform import tf_logging

    logging.basicConfig(level=logging.DEBUG,
                        stream=sys.stderr,
                        format='%(levelname)s '
                        '%(asctime)s.%(msecs)06d: '
                        '%(filename)s: '
                        '%(lineno)d '
                        '%(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S')
    tf_logger = tf_logging._get_logger()
    tf_logger.propagate = False

    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpus

    prepare_training_dir()
    dataset = datasets.create_dataset()

    model = models.create_model(
        num_char_classes=dataset.
        num_char_classes,  # represents `num_labels + 1` classes
        max_seq_len=dataset.max_seq_len,
        null_code=dataset.null_code)
    hparams = get_training_hparams()

    # If ps_tasks is zero, the local device is used. When using multiple
    # (non-local) replicas, the ReplicaDeviceSetter distributes the variables
    # across the different devices.

    device_setter = tf.train.replica_device_setter(FLAGS.ps_tasks,
                                                   merge_devices=True)
    with tf.device(device_setter):
        data = data_loader.get_data(dataset)

        endpoints = model.create_base(data.images, is_training=True)

        total_loss = model.create_loss(data, endpoints)
        init_fn = model.create_init_fn(FLAGS.checkpoint)

        # print(tf.trainable_variables('CRNN'))
        if FLAGS.show_graph_stats:
            logging.info('Total number of weights in the graph: %s',
                         profile_graph())
        train(total_loss, init_fn, hparams)
예제 #35
0
    def __get_input(self):
        preprocessor = COCOPreprocessor(
            batch_size=self.args.batch_size,
            output_shapes=[[self.args.batch_size, IMAGE_SIZE, IMAGE_SIZE, 3]],
            num_splits=1,
            dtype=tf.float32,
            train=False,
            distortions=True,
            resize_method=None,
            shift_ratio=0)

        class params:
            datasets_repeat_cached_sample = False

        self.params = params()
        self.dataset = datasets.create_dataset(self.args.data_location, 'coco')

        return preprocessor.minibatch(self.dataset,
                                      subset='validation',
                                      params=self.params,
                                      shift_ratio=0)
예제 #36
0
def validate(config_file):
    print('Reading config file...')
    configuration = parse_configuration(config_file)

    print('Initializing dataset...')
    val_dataset = create_dataset(configuration['val_dataset_params'])
    val_dataset_size = len(val_dataset)
    print('The number of validation samples = {0}'.format(val_dataset_size))

    print('Initializing model...')
    model = create_model(configuration['model_params'])
    model.setup()
    model.eval()

    model.pre_epoch_callback(configuration['model_params']['load_checkpoint'])

    for i, data in enumerate(val_dataset):
        model.set_input(data)  # unpack data from data loader
        model.test()  # run inference

    model.post_epoch_callback(configuration['model_params']['load_checkpoint'])
예제 #37
0
from scipy.io.arff import loadarff
from datasets import create_dataset
from utils import exp_incl_float_range, name_to_classifier_object
from generate_data_for_config import generate_datum
from datetime import datetime
import matplotlib.pyplot as plt

FIG_DIR = '/Users/jan/Dropbox/mphil_project/repo/figs/'

# The dataset for which score data will be plotted
dataset = create_dataset({'n_samples': 7500, 'n_features': 120, 'n_classes': 6, 'n_informative': 40})

# The learning algorithms
algorithms = [
    {'name': 'rnd_forest', 'parameters': {'n_estimators': 50}, 'time': [], 'score': []},
    {'name': 'log_reg', 'parameters': {}, 'time': [], 'score': []}
]

# Percentage of data values
data_range = exp_incl_float_range(0.1, 10, 1, 1.5)

def draw(ax, plt):
    ax.cla()
    ax.plot(data_range[:len(algorithms[0]['score'])], algorithms[0]['score'], 'r-')
    ax.plot(data_range[:len(algorithms[1]['score'])], algorithms[1]['score'], 'b-')
    ax.set_xlabel('% data')
    ax.set_ylabel('Score')
    plt.draw()

plt.ion()
fig, ax = plt.subplots(1,1)
# Argument parser
parser = ArgumentParser(description='Collect data')
parser.add_argument('-a', '--algorithm', type=str, required=True, default='rnd_forest', help='The learning algorithm, one of [rnd_forest, log_reg, svm, naive_bayes]')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-s', '--synthetic', type=str, help='Create a synthetic dataset with the given parameters')
group.add_argument('-l', '--load-arff', type=str, help='Load dataset from arff file with the given name')
parser.add_argument('-d', '--percentage-data', type=float, required=True, help='The percentage of data used')
parser.add_argument('parameter', metavar='parameter', nargs='*', help='Parameters to the algorithm in the form <param_name>:<int|float>:<number>')

args = parser.parse_args()

classifier = name_to_classifier_object(args.algorithm)

if args.synthetic:
    dataset = create_dataset(eval(args.synthetic))
elif args.load_arff:
    dataset = load_dataset({'name': args.load_arff})

param_names = []
param_values = []

for param_str in args.parameter:
    name, type_, value_str = param_str.split(':')
    param_names.append(name)
    if type_ == 'int':
        param_values.append(int(value_str))
    elif type_ == 'float':
        param_values.append(float(value_str))

elapsed_time, avg_score = generate_datum(dataset, classifier, args.percentage_data, dict(zip(param_names, param_values)))