Пример #1
0
    def __init__(self,
                 model_conf,
                 conf_path,
                 cuda,
                 class_idx,
                 save_segmentations_path=None,
                 skip_empty_images=False):
        super(SegmentationScore, self).__init__()
        self.cuda = cuda
        self.model = construct_model(model_conf, model_conf.name)
        self.class_idxs = class_idx
        if not isinstance(self.class_idxs, list):
            self.class_idxs = [self.class_idxs]
        self.skip_empty_images = skip_empty_images

        initialize_pretrained_model(model_conf, self.model, cuda, conf_path)

        if cuda != '':
            self.model = self.model.cuda()

        self.model.eval()

        self.save_segmentations_path = save_segmentations_path
        if save_segmentations_path is not None:
            parent_dir = os.path.dirname(save_segmentations_path)
            assert os.path.isdir(parent_dir), \
                'Did not find path {}'.format(parent_dir)
            if not os.path.isdir(save_segmentations_path):
                os.mkdir(save_segmentations_path)
            self.num_saved_segmns = 0
Пример #2
0
def main(args):
    device = torch.device('cuda')

    config = json.load(open(args.config))
    config['imgsize'] = (args.imgsize, args.imgsize)

    exp_dir = os.path.dirname(args.config)
    modelpath = exp_dir + '/best.pth'

    class_names = load_class_names()
    num_classes = len(class_names)
    v2_info = separate_class(class_names)
    num_makes = len(v2_info['make'].unique())
    num_types = len(v2_info['model_type'].unique())

    model = construct_model(config, num_classes, num_makes, num_types)
    load_weight(model, modelpath, device)
    model = model.to(device)

    train_loader, test_loader = prepare_loader(config)

    if config['version'] == 1:
        test_fn = test_v1
    else:
        test_fn = test_v2

    test_fn(model, test_loader, device, config)
def main(args):
    config = json.load(open(args.config))

    class_names = load_class_names()
    num_classes = len(class_names)
    v2_info = separate_class(class_names)
    num_makes = len(v2_info['make'].unique())
    num_types = len(v2_info['model_type'].unique())

    model = construct_model(config, num_classes, num_makes, num_types)
    count = 0
    for p in list(model.parameters()) + list(model.buffers()):
        count += p.data.view(-1).size(0)

    print(f'Number of parameters for {config["arch"]}: {count}')
Пример #4
0
def main(args):
    device = torch.device('cuda')
    config = json.load(open(args.config))

    exp_dir = os.path.dirname(args.config)
    modelpath = exp_dir + '/best.pth'

    class_names = load_class_names()
    num_classes = len(class_names)
    v2_info = separate_class(class_names)
    num_makes = len(v2_info['make'].unique())
    num_types = len(v2_info['model_type'].unique())
    train_loader, test_loader = prepare_loader(config)
    model = construct_model(config, num_classes, num_makes, num_types)

    def _prune(model, rate, save=True):
        print(f'Pruning rate: {rate:.2f}')
        load_weight(model, modelpath, device)
        model = model.to(device)

        if config['version'] == 1:
            test_fn = test_v1
        else:
            test_fn = test_v2

        prune(model, rate)

        res = test_fn(model, test_loader, device, config)

        if args.savefn is not None and save:
            savefndir = os.path.dirname(args.savefn)
            os.makedirs(savefndir, exist_ok=True)

            torch.save(model.state_dict(), args.savefn)

        return res

    hist = []

    if args.prune_all:
        for rate in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
            hist.append(_prune(model, rate * 100, save=False))
    else:
        hist.append(_prune(model, args.prune_rate * 100))

    hist = pd.DataFrame(hist)
    hist.to_csv(exp_dir + '/prune.csv')
def main(args):
    args.device = "cuda" if torch.cuda.is_available() else "cpu"
    train_dl, val_dl, test_dl = get_dataset(args.batch_size, args.dataset)
    model = construct_model(args.model_type)
    train(model, train_dl, val_dl, test_dl, args)
Пример #6
0
def main():
    ############ arguments setup #############
    args = ArgumentsTest().parse_args()
    print('***********************Arguments***********************')
    print(args)

    ############ get configuration info #############
    config = get_config(args)
    print('***********************Configurations***********************')
    print(config)

    ########### get model setup ############
    model = construct_model(args, config)
    print('***********************Model************************')
    print(model)
    if args.gpu_ids is not None:
        model.cuda(args.gpu_ids[0])

    ########### restore the model weights #########
    if args.gpu_ids is None:
        checkpoint = torch.load(args.restore_file)
    else:
        # Map model to be loaded to specified single gpu.
        loc = 'cuda:{}'.format(args.gpu_ids[0])
        checkpoint = torch.load(args.restore_file, map_location=loc)

    print('==> Resume checkpoint {}'.format(args.restore_file))
    if 'state_dict' in checkpoint:
        checkpoint = checkpoint['state_dict']
    if 'transfer' in args.version:
        checkpoint = {
            key: val
            for key, val in checkpoint.items() if 'classifier' not in key
        }
        msg = model.load_state_dict(checkpoint, strict=False)
        print(msg)
    else:
        model.load_state_dict(checkpoint)

    ############ dataset setup #############
    query_iterator, gallery_iterator = construct_dataset(args, config)

    ############ start testing #############
    torch.backends.cudnn.benchmark = True
    model.eval()

    if 'neck-fs' in args.version or 'external' in args.model_name:
        feature_extractor = model
    else:
        feature_extractor = model.feature_extractor

    if 'store-fs' in args.version:
        store_fs = True
    else:
        store_fs = False

    if 'test-external' in args.version:
        test_method = 'external'
    elif 'test-org' in args.version:
        test_method = 'euclidean'
    elif 'test-bnneck' in args.version:
        test_method = 'euclidean-normal'
    else:
        test_method = 'cosine'

    if 'flips' in args.version:
        flips = True
    else:
        flips = False

    if 'reranking' in args.version:
        reranking = True
    else:
        reranking = False

    result = test(feature_extractor,
                  query_iterator,
                  gallery_iterator,
                  args.gpu_ids,
                  store_fs=store_fs,
                  method=test_method,
                  flips=flips,
                  reranking=reranking)

    ############ print result #############
    print('*******************Test Results************************')
    for key in result:
        print('{}: {}'.format(key, result[key]))
Пример #7
0
def build_runner(conf, cuda, mode, resume=False):
  gen_model_conf = Configuration.from_dict(conf.generator_model)
  gen_model = construct_model(gen_model_conf, gen_model_conf.name)

  val_metric_transform = get_output_transform(conf, conf.application, 'test')
  val_metric_fns = {name: get_metric_fn(name)
                    for name in conf.get_attr('validation_metrics',
                                              default=[])}
  output_transform = get_output_transform(conf, conf.application, 'output')

  if mode == 'train':
    disc_model_conf = Configuration.from_dict(conf.discriminator_model)
    disc_model = construct_model(disc_model_conf, disc_model_conf.name)

    gen_adv_criteria = {loss_name: get_criterion(conf, loss_name, cuda, 'gen')
                        for loss_name in conf.generator_adversarial_losses}
    gen_criteria = {loss_name: get_criterion(conf, loss_name, cuda)
                    for loss_name in conf.generator_losses}
    disc_adv_criteria = {loss_name: get_criterion(conf, loss_name, cuda,
                                                  'disc')
                         for loss_name in conf.discriminator_losses}

    if cuda != '':
      utils.cudaify([gen_model, disc_model] +
                    list(gen_adv_criteria.values()) +
                    list(gen_criteria.values()) +
                    list(disc_adv_criteria.values()))

    # Important: construct optimizers after moving model to GPU!
    gen_opt_conf = Configuration.from_dict(conf.generator_optimizer)
    gen_optimizer = get_optimizer(gen_opt_conf, gen_opt_conf.name,
                                  gen_model.parameters())
    gen_lr_scheduler = None
    if gen_opt_conf.has_attr('lr_scheduler'):
      gen_lr_scheduler = get_lr_scheduler(gen_opt_conf,
                                          gen_opt_conf.lr_scheduler,
                                          gen_optimizer)

    disc_opt_conf = Configuration.from_dict(conf.discriminator_optimizer)
    disc_optimizer = get_optimizer(disc_opt_conf, disc_opt_conf.name,
                                   disc_model.parameters())
    disc_lr_scheduler = None
    if disc_opt_conf.has_attr('lr_scheduler'):
      disc_lr_scheduler = get_lr_scheduler(disc_opt_conf,
                                           disc_opt_conf.lr_scheduler,
                                           disc_optimizer)

    train_disc_metrics = conf.get_attr('train_discriminator_metrics',
                                       default=[])
    train_disc_metric_fns = {name: get_metric_fn(name)
                             for name in train_disc_metrics}

    train_gen_metric_transform = get_output_transform(conf, conf.application,
                                                      'train')
    train_gen_metrics = conf.get_attr('train_generator_metrics', default=[])
    train_gen_metric_fns = {name: get_metric_fn(name)
                            for name in train_gen_metrics}

    input_method = disc_model_conf.get_attr('input_method',
                                            default=DEFAULT_INPUT_METHOD)

    runner = AdversarialRunner(gen_model, disc_model,
                               gen_optimizer, disc_optimizer,
                               gen_lr_scheduler, disc_lr_scheduler,
                               gen_adv_criteria, gen_criteria,
                               disc_adv_criteria,
                               conf.get_attr('generator_loss_weights', {}),
                               conf.get_attr('discriminator_loss_weights', {}),
                               cuda,
                               train_gen_metric_fns,
                               train_gen_metric_transform,
                               train_disc_metric_fns,
                               val_metric_fns,
                               val_metric_transform,
                               output_transform,
                               input_method)
    if gen_model_conf.has_attr('pretrained_weights') and not resume:
      runner.initialize_pretrained_model(gen_model_conf, runner.gen,
                                         cuda, conf.file)

    if disc_model_conf.has_attr('pretrained_weights') and not resume:
      runner.initialize_pretrained_model(disc_model_conf, runner.disc,
                                         cuda, conf.file)
  else:
    if cuda != '':
      utils.cudaify(gen_model)
    runner = AdversarialRunner(gen_model,
                               cuda=cuda,
                               val_metric_fns=val_metric_fns,
                               val_metric_transform=val_metric_transform,
                               output_transform=output_transform)

  return runner
Пример #8
0
def build_runner(conf, cuda, mode='train', resume=False):
    model_conf = Configuration.from_dict(conf.model)

    model = construct_model(model_conf, model_conf.name)

    val_metric_transform = get_output_transform(conf, conf.application, 'test')
    val_metric_fns = {
        name: get_metric_fn(name)
        for name in conf.get_attr('validation_metrics', default=[])
    }
    output_transform = get_output_transform(conf, conf.application, 'output')

    if mode == 'train':
        criteria = {}
        if conf.has_attr('loss_name'):
            criteria[conf.loss_name] = get_criterion(conf, conf.loss_name,
                                                     cuda)
        else:
            for loss_name in conf.losses:
                criteria[loss_name] = get_criterion(conf, loss_name, cuda)

        assert len(
            criteria) > 0, 'Need at least one loss to optimize something!'

        if cuda != '':
            utils.cudaify([model] + list(criteria.values()))

        # Important: construct optimizer after moving model to GPU!
        opt_conf = Configuration.from_dict(conf.optimizer)
        optimizer = get_optimizer(opt_conf, opt_conf.name, model.parameters())

        lr_scheduler = None
        if opt_conf.has_attr('lr_scheduler'):
            lr_scheduler = get_lr_scheduler(opt_conf, opt_conf.lr_scheduler,
                                            optimizer)

        train_metric_transform = get_output_transform(conf, conf.application,
                                                      'train')
        train_metric_fns = {
            name: get_metric_fn(name)
            for name in conf.get_attr('train_metrics', default=[])
        }

        runner = Runner(model, criteria, conf.get_attr('loss_weights', {}),
                        optimizer, lr_scheduler, cuda, train_metric_fns,
                        train_metric_transform, val_metric_fns,
                        val_metric_transform, output_transform)

        if model_conf.has_attr('pretrained_weights') and not resume:
            runner.initialize_pretrained_model(model_conf, runner.model, cuda,
                                               conf.file)
    else:
        if cuda != '':
            utils.cudaify(model)
        runner = Runner(model,
                        cuda=cuda,
                        val_metric_fns=val_metric_fns,
                        val_metric_transform=val_metric_transform,
                        output_transform=output_transform)

    return runner
Пример #9
0
def train():
    """run training"""

    # get parameter values
    parser = ArgumentParser()
    parser = params.add_trainer_args(parser)
    param_dict = vars(parser.parse_args())
    if not os.path.exists(param_dict["train_chkpt_dir"]):
        os.makedirs(param_dict["train_chkpt_dir"])
    param_yml = os.path.join(param_dict["train_chkpt_dir"], 'train_params.yml')
    with open(param_yml, 'w') as outfile:
        yaml.dump(param_dict, outfile, default_flow_style=False)

    # load batch and make model predictions
    [param_dict, true_labels, logits, layers,
     meta_batch] = models.construct_model(param_dict, is_training=True)

    # create model summaries stats
    [
        pred_labels, correct_bool, prob_vec, entropy, true_prob_score,
        pred_prob_score
    ] = models.model_summarize(true_labels, logits,
                               param_dict['out_label_count'])

    # calculated losses
    true_labels_one_hot = tf.one_hot(true_labels,
                                     depth=param_dict['out_label_count'],
                                     on_value=1,
                                     off_value=0)
    classification_loss = tf.losses.softmax_cross_entropy(
        true_labels_one_hot, logits)
    weights = tf.trainable_variables()
    # l1
    l1_reg = slim.l1_regularizer(float(param_dict['reg_l1_scale']))
    l1_loss = slim.regularizers.apply_regularization(l1_reg,
                                                     weights_list=weights)
    # l2
    l2_reg = slim.l2_regularizer(float(param_dict['reg_l2_scale']))
    l2_loss = slim.regularizers.apply_regularization(l2_reg,
                                                     weights_list=weights)
    # KL
    global KL_SCALE
    global KL_SPARCE
    KL_SCALE = param_dict['reg_kl_scale']
    KL_SPARCE = param_dict['reg_kl_sparsity']
    print("train kl_params: " + str([KL_SCALE, KL_SPARCE]))
    kl_loss = slim.regularizers.apply_regularization(kl_regularizer,
                                                     weights_list=layers)
    total_loss = tf.losses.get_total_loss()
    tf.summary.scalar('batch_optimization/total_loss', total_loss)
    tf.summary.scalar('batch_optimization/classification_loss',
                      classification_loss)
    tf.summary.scalar('batch_optimization/kl_loss', kl_loss)
    tf.summary.scalar('batch_optimization/l1_loss', l1_loss)
    tf.summary.scalar('batch_optimization/l2_loss', l2_loss)

    # create optimizer
    if param_dict['train_optimizer_str'] == "Adam":
        optimizer = tf.train.AdamOptimizer(param_dict['train_learning_rate'])
    else:
        optimizer = tf.train.MomentumOptimizer(
            param_dict['train_learning_rate'])

    # create training op
    train_op = slim.learning.create_train_op(total_loss,
                                             optimizer=optimizer,
                                             summarize_gradients=False)

    # save training parameters
    with open(param_yml, 'w') as outfile:
        yaml.dump(param_dict, outfile, default_flow_style=False)

    # print model parameter stats
    param_stats = tf.contrib.tfprof.model_analyzer.print_model_analysis(
        tf.get_default_graph(),
        tfprof_options=tf.contrib.tfprof.model_analyzer.
        TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
    print('model total_params: %d\n' % param_stats.total_parameters)

    # run training
    error = slim.learning.train(
        train_op,
        param_dict['train_chkpt_dir'],
        number_of_steps=param_dict['train_max_steps'],
        save_summaries_secs=param_dict['train_save_summ_secs'],
        save_interval_secs=param_dict['train_save_ckpt_secs'],
        session_config=tf.ConfigProto(
            gpu_options=tf.GPUOptions(allow_growth=True),
            log_device_placement=False,
            allow_soft_placement=True))
    print("train error: " + str(error))
Пример #10
0
def main():
    ############ arguments setup #############
    args = ArgumentsTrainVal().parse_args()
    print('***********************Arguments***********************')
    print(args)

    ############ get configuration info #############
    config = get_config(args)
    print('***********************Configurations***********************')
    print(config)

    ############ checkpoints and logs directory setup##############
    checkpoint_dir = os.path.join('checkpoints', args.check_log_dir)
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)
    args.checkpoint_dir = checkpoint_dir

    log_dir = os.path.join('logs', args.check_log_dir)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    args.log_dir = log_dir

    ########### get model setup ############
    model = construct_model(args, config)
    print('***********************Model************************')
    print(model)
    # prepare the model before restoring and calling optimzier constrcutor
    if args.gpu_ids is not None:
        model.cuda(args.gpu_ids[0])

    ############ optimizer setup ###########
    optimizer = config.optimizer_func(model)

    ########### serialization of the running ###########
    if args.restore_file is None:
        # move initialization into the model constuctor __init__
        # config.initialization_func(model)
        pass
    else:
        if args.gpu_ids is None:
            checkpoint = torch.load(args.restore_file)
        else:
            # Map model to be loaded to specified single gpu.
            loc = 'cuda:{}'.format(args.gpu_ids[0])
            checkpoint = torch.load(args.restore_file, map_location=loc)

        if 'new-optim' in args.version:
            print('==> Reload weights from {}'.format(args.restore_file))
            ckpt = checkpoint
            if 'state_dict' in checkpoint:
                ckpt = checkpoint['state_dict']
            model.load_state_dict(ckpt)
        else:
            if args.resume_iteration == 0:
                print('==> Transfer model weights from {}'.format(
                    args.restore_file))
                if 'external-bnneck' in args.model_name:
                    feature_extractor = model.base
                else:
                    feature_extractor = model.feature_extractor
                msg = feature_extractor.load_state_dict(checkpoint,
                                                        strict=False)
                print(msg)
            else:
                print('==> Resume checkpoint {}'.format(args.restore_file))
                model.load_state_dict(checkpoint['state_dict'])
                optimizer.load_state_dict(checkpoint['optimizer'])
                for group in optimizer.param_groups:
                    group['initial_lr'] = args.learning_rate
                    group['lr'] = args.learning_rate

    ############ dataset setup #############
    if 'id' in args.version:
        train_iterator, val_iterator, query_iterator, gallary_iterator, id_iterator = construct_dataset(
            args, config)
    else:
        train_iterator, val_iterator, query_iterator, gallary_iterator = construct_dataset(
            args, config)
        id_iterator = None

    ############ learning rate scheduler setup ############
    # TODO: add lr_scheduler state_dict
    if config.lr_scheduler_func:
        lr_scheduler = config.lr_scheduler_func(optimizer,
                                                **config.lr_scheduler_params)
        lr_scheduler_iter = None
    else:
        lr_scheduler = None
        lr_scheduler_iter = config.lr_scheduler_iter_func(
            len(train_iterator), optimizer)

    ############ engine setup ##############
    engine_args = dict(
        gpu_ids=args.gpu_ids,
        network=model,
        criterion=config.loss_func,
        train_iterator=train_iterator,
        validate_iterator=val_iterator,
        optimizer=optimizer,
    )
    engine = construct_engine(engine_args,
                              log_freq=args.log_freq,
                              log_dir=args.log_dir,
                              checkpoint_dir=checkpoint_dir,
                              checkpoint_freq=args.checkpoint_freq,
                              lr_scheduler=lr_scheduler,
                              lr_scheduler_iter=lr_scheduler_iter,
                              metric_dict=config.metric_dict,
                              query_iterator=query_iterator,
                              gallary_iterator=gallary_iterator,
                              id_feature_params=config.id_feature_params,
                              id_iterator=id_iterator,
                              test_params=config.test_params)

    engine.resume(args.maxepoch, args.resume_epoch, args.resume_iteration)
def build_runner(conf, cuda, mode):
    gen_model_conf = Configuration.from_dict(conf.generator_model, conf)
    gen_model = construct_model(gen_model_conf, gen_model_conf.name, cuda)

    val_metric_fns = {
        name: get_metric_fn(conf, name, cuda, 'test')
        for name in conf.get_attr('validation_metrics', default=[])
    }
    output_transform = get_output_transform(conf, conf.application,
                                            'inference')
    test_input_batch_transform = get_input_batch_transform(
        conf, conf.application, 'test')

    if mode == 'train':
        disc_model_conf = Configuration.from_dict(conf.discriminator_model,
                                                  conf)
        disc_model = construct_model(disc_model_conf, disc_model_conf.name,
                                     cuda)

        gen_adv_criteria = {
            loss_name: get_criterion(conf, loss_name, cuda, loss_type='gen')
            for loss_name in conf.generator_adversarial_losses
        }
        gen_criteria = {
            loss_name: get_criterion(conf, loss_name, cuda)
            for loss_name in conf.generator_losses
        }
        disc_adv_criteria = {
            loss_name: get_criterion(conf, loss_name, cuda, loss_type='disc')
            for loss_name in conf.discriminator_losses
        }

        if cuda != '':
            # Potentially split models over GPUs
            gen_model, disc_model = utils.cudaify([gen_model, disc_model],
                                                  cuda)
            utils.cudaify(
                list(gen_adv_criteria.values()) + list(gen_criteria.values()) +
                list(disc_adv_criteria.values()))

        # Important: construct optimizers after moving model to GPU!
        gen_opt_conf = Configuration.from_dict(conf.generator_optimizer, conf)
        gen_optimizer = get_optimizer(gen_opt_conf, gen_opt_conf.name,
                                      gen_model.parameters())
        gen_lr_scheduler = None
        if gen_opt_conf.has_attr('lr_scheduler'):
            gen_lr_scheduler = get_lr_scheduler(gen_opt_conf,
                                                gen_opt_conf.lr_scheduler,
                                                gen_optimizer)

        disc_opt_conf = Configuration.from_dict(conf.discriminator_optimizer,
                                                conf)
        disc_optimizer = get_optimizer(disc_opt_conf, disc_opt_conf.name,
                                       disc_model.parameters())
        disc_lr_scheduler = None
        if disc_opt_conf.has_attr('lr_scheduler'):
            disc_lr_scheduler = get_lr_scheduler(disc_opt_conf,
                                                 disc_opt_conf.lr_scheduler,
                                                 disc_optimizer)

        train_input_batch_transform = get_input_batch_transform(
            conf, conf.application, 'train')
        train_disc_metrics = conf.get_attr('train_discriminator_metrics',
                                           default=[])
        train_disc_metric_fns = {
            name: get_metric_fn(conf, name, cuda, 'train')
            for name in train_disc_metrics
        }
        val_disc_metric_key = 'validation_discriminator_metrics'
        val_disc_metric_fns = {
            name: get_metric_fn(conf, name, cuda, 'test')
            for name in conf.get_attr(val_disc_metric_key, default=[])
        }

        train_gen_metrics = conf.get_attr('train_generator_metrics',
                                          default=[])
        train_gen_metric_fns = {
            name: get_metric_fn(conf, name, cuda, 'train')
            for name in train_gen_metrics
        }

        disc_input_fn = get_discriminator_input_fn(conf, disc_model_conf)
        val_disc_input_fn = get_discriminator_input_fn(conf,
                                                       disc_model_conf,
                                                       no_pool=True)

        pretr_generator_epochs = conf.get_attr('pretrain_generator_epochs')
        pretr_discriminator_epochs = conf.get_attr(
            'pretrain_discriminator_epochs')

        runner = AdversarialRunner(
            gen_model, disc_model, gen_optimizer, disc_optimizer,
            gen_lr_scheduler, disc_lr_scheduler, gen_adv_criteria,
            gen_criteria, disc_adv_criteria,
            conf.get_attr('generator_loss_weights', {}),
            conf.get_attr('discriminator_loss_weights', {}), cuda,
            train_gen_metric_fns, train_disc_metric_fns, val_metric_fns,
            val_disc_metric_fns, output_transform, train_input_batch_transform,
            test_input_batch_transform,
            gen_opt_conf.get_attr('updates_per_step', 1),
            disc_opt_conf.get_attr('updates_per_step',
                                   1), disc_input_fn, val_disc_input_fn,
            pretr_generator_epochs, pretr_discriminator_epochs)
        if gen_model_conf.has_attr('pretrained_weights'):
            initialize_pretrained_model(gen_model_conf, runner.gen, cuda,
                                        conf.file)

        if disc_model_conf.has_attr('pretrained_weights'):
            initialize_pretrained_model(disc_model_conf, runner.disc, cuda,
                                        conf.file)
    else:
        if cuda != '':
            utils.cudaify(gen_model)
        runner = AdversarialRunner(
            gen_model,
            cuda=cuda,
            val_metric_fns=val_metric_fns,
            output_transform=output_transform,
            test_input_batch_transform=test_input_batch_transform)

    return runner
Пример #12
0
def main(args):
    device = torch.device('cuda')

    config = {
        'batch_size': args.batch_size,
        'test_batch_size': args.batch_size,
        'lr': args.lr,
        'weight_decay': args.weight_decay,
        'momentum': args.momentum,
        'epochs': args.epochs,
        'imgsize': (args.imgsize, args.imgsize),
        'arch': args.arch,
        'version': args.version,
        'make_loss': args.make_loss,
        'type_loss': args.type_loss,
        'finetune': args.finetune,
        'path': args.path
    }

    exp_dir = get_exp_dir(config)

    class_names = load_class_names()
    num_classes = len(class_names)
    v2_info = separate_class(class_names)
    num_makes = len(v2_info['make'].unique())
    num_types = len(v2_info['model_type'].unique())

    model = construct_model(config, num_classes, num_makes, num_types)

    if config['finetune']:
        load_weight(model, config['path'], device)

    model = model.to(device)

    optimizer = optim.SGD(model.parameters(),
                          lr=config['lr'],
                          momentum=config['momentum'],
                          weight_decay=config['weight_decay'])

    lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                  [100, 150],
                                                  gamma=0.1)

    train_loader, test_loader = prepare_loader(config)

    best_acc = 0
    res = []

    if config['version'] == 1:
        train_fn = train_v1
        test_fn = test_v1
    else:  # 2 and 3
        train_fn = train_v2
        test_fn = test_v2

    for ep in range(1, config['epochs'] + 1):
        trainres = train_fn(ep, model, optimizer, lr_scheduler, train_loader, device, config)
        valres = test_fn(model, test_loader, device, config)
        trainres.update(valres)

        if best_acc < valres['val_acc']:
            best_acc = valres['val_acc']
            torch.save(model.state_dict(), exp_dir + '/best.pth')

        res.append(trainres)

    print(f'Best accuracy: {best_acc:.4f}')
    res = pd.DataFrame(res)
    res.to_csv(exp_dir + '/history.csv')