Exemplo n.º 1
0
def train(cli_params):
    cli_params['save_dir'] = prepare_dir(cli_params['save_to'])
    logfile = os.path.join(cli_params['save_dir'], 'log.txt')

    # Log also DEBUG to a file
    fh = logging.FileHandler(filename=logfile)
    fh.setLevel(logging.DEBUG)
    logger.addHandler(fh)

    logger.info('Logging into %s' % logfile)

    p, loaded = load_and_log_params(cli_params)
    in_dim, data = setup_data(p, test_set=False)
    if not loaded:
        # Set the zero layer to match input dimensions
        p.encoder_layers = (in_dim, ) + p.encoder_layers

    ladder = setup_model(p)

    # Training
    all_params = ComputationGraph([ladder.costs.total]).parameters
    logger.info('Found the following parameters: %s' % str(all_params))

    # Fetch all batch normalization updates. They are in the clean path.
    bn_updates = ComputationGraph([ladder.costs.class_clean]).updates
    assert 'counter' in [u.name for u in list(bn_updates.keys())], \
        'No batch norm params in graph - the graph has been cut?'

    training_algorithm = GradientDescent(
        cost=ladder.costs.total,
        params=all_params,
        step_rule=Adam(learning_rate=ladder.lr))
    # In addition to actual training, also do BN variable approximations
    training_algorithm.add_updates(bn_updates)

    short_prints = {
        "train": {
            'T_C_class': ladder.costs.class_corr,
            'T_C_de': list(ladder.costs.denois.values()),
        },
        "valid_approx":
        OrderedDict([
            ('V_C_class', ladder.costs.class_clean),
            ('V_E', ladder.error.clean),
            ('V_C_de', list(ladder.costs.denois.values())),
        ]),
        "valid_final":
        OrderedDict([
            ('VF_C_class', ladder.costs.class_clean),
            ('VF_E', ladder.error.clean),
            ('VF_C_de', list(ladder.costs.denois.values())),
        ]),
    }

    main_loop = MainLoop(
        training_algorithm,
        # Datastream used for training
        make_datastream(data.train,
                        data.train_ind,
                        p.batch_size,
                        n_labeled=p.labeled_samples,
                        n_unlabeled=p.unlabeled_samples),
        model=Model(theano.tensor.cast(ladder.costs.total, "float32")),
        extensions=[
            FinishAfter(after_n_epochs=p.num_epochs),

            # This will estimate the validation error using
            # running average estimates of the batch normalization
            # parameters, mean and variance
            ApproxTestMonitoring(
                [ladder.costs.class_clean, ladder.error.clean] +
                list(ladder.costs.denois.values()),
                make_datastream(data.valid,
                                data.valid_ind,
                                p.valid_batch_size,
                                scheme=ShuffledScheme),
                prefix="valid_approx"),
            TrainingDataMonitoring([
                ladder.costs.total, ladder.costs.class_corr,
                training_algorithm.total_gradient_norm
            ] + list(ladder.costs.denois.values()),
                                   prefix="train",
                                   after_epoch=True),
            SaveParams(None, all_params, p.save_dir, after_epoch=True),
            SaveExpParams(p, p.save_dir, before_training=True),
            ShortPrinting(short_prints),
            LRDecay(ladder.lr,
                    p.num_epochs * p.lrate_decay,
                    p.num_epochs,
                    after_epoch=True),
        ])
    main_loop.run()

    # Get results
    df = main_loop.log.to_dataframe()
    col = 'valid_final_error_rate_clean'
    logger.info('%s %g' % (col, df[col].iloc[-1]))

    if main_loop.log.status['epoch_interrupt_received']:
        return None
    return df
Exemplo n.º 2
0
def train_ladder(cli_params, dataset=None, save_to='results/ova_all_full'):
    cli_params['save_dir'] = prepare_dir(save_to)
    logfile = os.path.join(cli_params['save_dir'], 'log.txt')

    # Log also DEBUG to a file
    fh = logging.FileHandler(filename=logfile)
    fh.setLevel(logging.DEBUG)
    logger.addHandler(fh)

    logger.info('Logging into %s' % logfile)

    p, loaded = load_and_log_params(cli_params)

    ladder = setup_model(p)

    # Training
    all_params = ComputationGraph([ladder.costs.total]).parameters
    logger.info('Found the following parameters: %s' % str(all_params))

    # Fetch all batch normalization updates. They are in the clean path.
    bn_updates = ComputationGraph([ladder.costs.class_clean]).updates
    assert 'counter' in [u.name for u in bn_updates.keys()], \
        'No batch norm params in graph - the graph has been cut?'

    training_algorithm = GradientDescent(
        cost=ladder.costs.total,
        params=all_params,
        step_rule=Adam(learning_rate=ladder.lr))
    # In addition to actual training, also do BN variable approximations
    training_algorithm.add_updates(bn_updates)

    short_prints = {
        "train": {
            'T_C_class': ladder.costs.class_corr,
            'T_C_de': ladder.costs.denois.values(),
        },
        "valid_approx":
        OrderedDict([
            ('V_C_class', ladder.costs.class_clean),
            ('V_E', ladder.error.clean),
            ('V_C_de', ladder.costs.denois.values()),
        ]),
        "valid_final":
        OrderedDict([
            ('VF_C_class', ladder.costs.class_clean),
            ('VF_E', ladder.error.clean),
            ('VF_C_de', ladder.costs.denois.values()),
        ]),
    }

    ovadataset = dataset['ovadataset']
    train_indexes = dataset['train_indexes']
    val_indexes = dataset['val_indexes']

    main_loop = MainLoop(
        training_algorithm,
        # Datastream used for training
        make_datastream(ovadataset,
                        train_indexes,
                        p.batch_size,
                        scheme=ShuffledScheme),
        model=Model(ladder.costs.total),
        extensions=[
            FinishAfter(after_n_epochs=p.num_epochs),

            # This will estimate the validation error using
            # running average estimates of the batch normalization
            # parameters, mean and variance
            ApproxTestMonitoring(
                [ladder.costs.class_clean, ladder.error.clean] +
                ladder.costs.denois.values(),
                make_datastream(ovadataset, val_indexes, p.batch_size),
                prefix="valid_approx"),

            # This Monitor is slower, but more accurate since it will first
            # estimate batch normalization parameters from training data and
            # then do another pass to calculate the validation error.
            FinalTestMonitoring(
                [ladder.costs.class_clean, ladder.error.clean_mc] +
                ladder.costs.denois.values(),
                make_datastream(ovadataset, train_indexes, p.batch_size),
                make_datastream(ovadataset, val_indexes, p.batch_size),
                prefix="valid_final",
                after_n_epochs=p.num_epochs),
            TrainingDataMonitoring([
                ladder.costs.total, ladder.costs.class_corr,
                training_algorithm.total_gradient_norm
            ] + ladder.costs.denois.values(),
                                   prefix="train",
                                   after_epoch=True),
            ShortPrinting(short_prints),
            LRDecay(ladder.lr,
                    p.num_epochs * p.lrate_decay,
                    p.num_epochs,
                    after_epoch=True),
        ])
    main_loop.run()

    # Get results
    df = main_loop.log.to_dataframe()
    col = 'valid_final_error_matrix_cost'
    logger.info('%s %g' % (col, df[col].iloc[-1]))

    ds = make_datastream(ovadataset, val_indexes, p.batch_size)
    outputs = ladder.act.clean.labeled.h[len(ladder.layers) - 1]
    outputreplacer = TestMonitoring()
    _, _, outputs = outputreplacer._get_bn_params(outputs)

    cg = ComputationGraph(outputs)
    f = cg.get_theano_function()

    it = ds.get_epoch_iterator(as_dict=True)
    res = []
    inputs = {
        'features_labeled': [],
        'targets_labeled': [],
        'features_unlabeled': []
    }
    # Loop over one epoch
    for d in it:
        # Store all inputs
        for k, v in d.iteritems():
            inputs[k] += [v]
        # Store outputs
        res += [f(*[d[str(inp)] for inp in cg.inputs])]

    # Concatenate all minibatches
    res = [numpy.vstack(minibatches) for minibatches in zip(*res)]
    inputs = {k: numpy.vstack(v) for k, v in inputs.iteritems()}

    if main_loop.log.status['epoch_interrupt_received']:
        return None
    return res[0], inputs
Exemplo n.º 3
0
def train(cli_params):
    fn = 'noname'
    if 'save_to' in nodefaultargs or not cli_params.get('load_from'):
        fn = cli_params['save_to']
    cli_params['save_dir'] = prepare_dir(fn)
    nodefaultargs.append('save_dir')

    logfile = os.path.join(cli_params['save_dir'], 'log.txt')

    # Log also DEBUG to a file
    fh = logging.FileHandler(filename=logfile)
    fh.setLevel(logging.DEBUG)
    logger.addHandler(fh)

    logger.info('Logging into %s' % logfile)

    p, loaded = load_and_log_params(cli_params)

    in_dim, data, whiten, cnorm = setup_data(p, test_set=False)
    if not loaded:
        # Set the zero layer to match input dimensions
        p.encoder_layers = (in_dim, ) + p.encoder_layers

    ladder = setup_model(p)

    # Training
    all_params = ComputationGraph([ladder.costs.total]).parameters
    logger.info('Found the following parameters: %s' % str(all_params))

    # Fetch all batch normalization updates. They are in the clean path.
    # you can turn off BN by setting is_normalizing = False in ladder.py
    bn_updates = ComputationGraph([ladder.costs.class_clean]).updates
    assert not bn_updates or 'counter' in [u.name for u in bn_updates.keys()], \
        'No batch norm params in graph - the graph has been cut?'

    training_algorithm = GradientDescent(
        cost=ladder.costs.total,
        parameters=all_params,
        step_rule=Adam(learning_rate=ladder.lr))
    # In addition to actual training, also do BN variable approximations
    if bn_updates:
        training_algorithm.add_updates(bn_updates)

    short_prints = {
        "train":
        OrderedDict([
            ('T_E', ladder.error.clean),
            ('T_O', ladder.oos.clean),
            ('T_C_class', ladder.costs.class_corr),
            ('T_C_de', ladder.costs.denois.values()),
            ('T_T', ladder.costs.total),
        ]),
        "valid_approx":
        OrderedDict([
            ('V_C_class', ladder.costs.class_clean),
            ('V_E', ladder.error.clean),
            ('V_O', ladder.oos.clean),
            ('V_C_de', ladder.costs.denois.values()),
            ('V_T', ladder.costs.total),
        ]),
        "valid_final":
        OrderedDict([
            ('VF_C_class', ladder.costs.class_clean),
            ('VF_E', ladder.error.clean),
            ('VF_O', ladder.oos.clean),
            ('VF_C_de', ladder.costs.denois.values()),
            ('V_T', ladder.costs.total),
        ]),
    }

    if len(data.valid_ind):
        main_loop = MainLoop(
            training_algorithm,
            # Datastream used for training
            make_datastream(data.train,
                            data.train_ind,
                            p.batch_size,
                            n_labeled=p.labeled_samples,
                            n_unlabeled=p.unlabeled_samples,
                            whiten=whiten,
                            cnorm=cnorm,
                            balanced_classes=p.balanced_classes,
                            dseed=p.dseed),
            model=Model(ladder.costs.total),
            extensions=[
                FinishAfter(after_n_epochs=p.num_epochs),

                # This will estimate the validation error using
                # running average estimates of the batch normalization
                # parameters, mean and variance
                ApproxTestMonitoring([
                    ladder.costs.class_clean, ladder.error.clean,
                    ladder.oos.clean, ladder.costs.total
                ] + ladder.costs.denois.values(),
                                     make_datastream(
                                         data.valid,
                                         data.valid_ind,
                                         p.valid_batch_size,
                                         whiten=whiten,
                                         cnorm=cnorm,
                                         balanced_classes=p.balanced_classes,
                                         scheme=ShuffledScheme),
                                     prefix="valid_approx"),

                # This Monitor is slower, but more accurate since it will first
                # estimate batch normalization parameters from training data and
                # then do another pass to calculate the validation error.
                FinalTestMonitoring(
                    [
                        ladder.costs.class_clean, ladder.error.clean,
                        ladder.oos.clean, ladder.costs.total
                    ] + ladder.costs.denois.values(),
                    make_datastream(data.train,
                                    data.train_ind,
                                    p.batch_size,
                                    n_labeled=p.labeled_samples,
                                    whiten=whiten,
                                    cnorm=cnorm,
                                    balanced_classes=p.balanced_classes,
                                    scheme=ShuffledScheme),
                    make_datastream(data.valid,
                                    data.valid_ind,
                                    p.valid_batch_size,
                                    n_labeled=len(data.valid_ind),
                                    whiten=whiten,
                                    cnorm=cnorm,
                                    balanced_classes=p.balanced_classes,
                                    scheme=ShuffledScheme),
                    prefix="valid_final",
                    after_n_epochs=p.num_epochs,
                    after_training=True),
                TrainingDataMonitoring([
                    ladder.error.clean, ladder.oos.clean, ladder.costs.total,
                    ladder.costs.class_corr,
                    training_algorithm.total_gradient_norm
                ] + ladder.costs.denois.values(),
                                       prefix="train",
                                       after_epoch=True),
                # ladder.costs.class_clean - save model whenever we have best validation result another option `('train',ladder.costs.total)`
                SaveParams(('valid_approx', ladder.error.clean),
                           all_params,
                           p.save_dir,
                           after_epoch=True),
                SaveExpParams(p, p.save_dir, before_training=True),
                SaveLog(p.save_dir, after_training=True),
                ShortPrinting(short_prints),
                LRDecay(ladder.lr,
                        p.num_epochs * p.lrate_decay,
                        p.num_epochs,
                        lrmin=p.lrmin,
                        after_epoch=True),
            ])
    else:
        main_loop = MainLoop(
            training_algorithm,
            # Datastream used for training
            make_datastream(data.train,
                            data.train_ind,
                            p.batch_size,
                            n_labeled=p.labeled_samples,
                            n_unlabeled=p.unlabeled_samples,
                            whiten=whiten,
                            cnorm=cnorm,
                            balanced_classes=p.balanced_classes,
                            dseed=p.dseed),
            model=Model(ladder.costs.total),
            extensions=[
                FinishAfter(after_n_epochs=p.num_epochs),
                TrainingDataMonitoring([
                    ladder.error.clean, ladder.oos.clean, ladder.costs.total,
                    ladder.costs.class_corr,
                    training_algorithm.total_gradient_norm
                ] + ladder.costs.denois.values(),
                                       prefix="train",
                                       after_epoch=True),
                # ladder.costs.class_clean - save model whenever we have best validation result another option `('train',ladder.costs.total)`
                SaveParams(('train', ladder.error.clean),
                           all_params,
                           p.save_dir,
                           after_epoch=True),
                SaveExpParams(p, p.save_dir, before_training=True),
                SaveLog(p.save_dir, after_training=True),
                ShortPrinting(short_prints),
                LRDecay(ladder.lr,
                        p.num_epochs * p.lrate_decay,
                        p.num_epochs,
                        lrmin=p.lrmin,
                        after_epoch=True),
            ])
    main_loop.run()

    # Get results
    if len(data.valid_ind) == 0:
        return None

    df = DataFrame.from_dict(main_loop.log, orient='index')
    col = 'valid_final_error_rate_clean'
    logger.info('%s %g' % (col, df[col].iloc[-1]))

    if main_loop.log.status['epoch_interrupt_received']:
        return None
    return df
Exemplo n.º 4
0
def train(cli_params):
    cli_params['save_dir'] = prepare_dir(cli_params['save_to'])
    logfile = os.path.join(cli_params['save_dir'], 'log.txt')

    # Log also DEBUG to a file
    fh = logging.FileHandler(filename=logfile)
    fh.setLevel(logging.DEBUG)
    logger.addHandler(fh)

    logger.info('Logging into %s' % logfile)

    p, loaded = load_and_log_params(cli_params)
    in_dim, data, whiten, cnorm = setup_data(p, test_set=False)

    if not loaded:
        # Set the zero layer to match input dimensions
        p.encoder_layers = (in_dim, ) + p.encoder_layers

    ladder = setup_model(p)

    # Training
    all_params = ComputationGraph([ladder.costs.total]).parameters
    logger.info('Found the following parameters: %s' % str(all_params))

    # Fetch all batch normalization updates. They are in the clean path.
    bn_updates = ComputationGraph([ladder.costs.class_clean]).updates
    assert 'counter' in [u.name for u in bn_updates.keys()], \
        'No batch norm params in graph - the graph has been cut?'

    training_algorithm = GradientDescent(
        cost=ladder.costs.total,
        params=all_params,
        step_rule=Adam(learning_rate=ladder.lr))
    # In addition to actual training, also do BN variable approximations
    training_algorithm.add_updates(bn_updates)

    model = Model(ladder.costs.total)

    monitored_variables = [
        ladder.costs.class_corr,
        ladder.costs.class_clean,
        ladder.error,
        #         training_algorithm.total_gradient_norm,
        ladder.costs.total] \
#         + ladder.costs.denois.values()

    # Make a global history recorder so that we can get summary at end of
    # training when we write to Sentinel
    # global_history records all relevant monitoring vars
    # updated by SaveLog every time
    global_history = {}

    main_loop = MainLoop(
        training_algorithm,
        # Datastream used for training
        make_datastream(data.train,
                        data.train_ind,
                        p.batch_size,
                        n_labeled=p.labeled_samples,
                        n_unlabeled=p.unlabeled_samples,
                        whiten=whiten,
                        cnorm=cnorm),
        model=model,
        extensions=[
            FinishAfter(after_n_epochs=p.num_epochs),

            # write out to sentinel file for experiment automator to work
            SentinelWhenFinish(save_dir=p.save_dir,
                               global_history=global_history),

            # This will estimate the validation error using
            # running average estimates of the batch normalization
            # parameters, mean and variance
            ApproxTestMonitoring(monitored_variables,
                                 make_datastream(data.valid,
                                                 data.valid_ind,
                                                 p.valid_batch_size,
                                                 whiten=whiten,
                                                 cnorm=cnorm,
                                                 scheme=ShuffledScheme),
                                 prefix="valid_approx"),

            # This Monitor is slower, but more accurate since it will first
            # estimate batch normalization parameters from training data and
            # then do another pass to calculate the validation error.
            FinalTestMonitoring(monitored_variables,
                                make_datastream(data.train,
                                                data.train_ind,
                                                p.batch_size,
                                                n_labeled=p.labeled_samples,
                                                whiten=whiten,
                                                cnorm=cnorm,
                                                scheme=ShuffledScheme),
                                make_datastream(data.valid,
                                                data.valid_ind,
                                                p.valid_batch_size,
                                                n_labeled=len(data.valid_ind),
                                                whiten=whiten,
                                                cnorm=cnorm,
                                                scheme=ShuffledScheme),
                                prefix="valid_final",
                                after_n_epochs=p.num_epochs),
            TrainingDataMonitoring(variables=monitored_variables,
                                   prefix="train",
                                   after_epoch=True),
            SaveParams('valid_approx_cost_class_corr', model, p.save_dir),
            #             SaveParams(None, all_params, p.save_dir, after_epoch=True),
            SaveExpParams(p, p.save_dir, before_training=True),
            SaveLog(save_dir=p.save_dir,
                    after_epoch=True,
                    global_history=global_history),
            Printing(),
            #             ShortPrinting(short_prints),
            LRDecay(ladder.lr,
                    p.num_epochs * p.lrate_decay,
                    p.num_epochs,
                    after_epoch=True),
        ])
    main_loop.run()

    # Get results
    df = main_loop.log.to_dataframe()
    col = 'valid_final_error_rate'
    logger.info('%s %g' % (col, df[col].iloc[-1]))

    if main_loop.log.status['epoch_interrupt_received']:
        return None
    return df