Ejemplo n.º 1
0
def train(cli_params):
    fn = 'noname'
    if 'save_to' in nodefaultargs or not cli_params.get('load_from'):
        fn = cli_params['save_to']
    cli_params['save_dir'] = prepare_dir(fn)
    nodefaultargs.append('save_dir')

    logfile = os.path.join(cli_params['save_dir'], 'log.txt')

    # Log also DEBUG to a file
    fh = logging.FileHandler(filename=logfile)
    fh.setLevel(logging.DEBUG)
    logger.addHandler(fh)

    logger.info('Logging into %s' % logfile)

    p, loaded = load_and_log_params(cli_params)

    in_dim, data, whiten, cnorm = setup_data(p, test_set=False)
    if not loaded:
        # Set the zero layer to match input dimensions
        p.encoder_layers = (in_dim, ) + p.encoder_layers

    ladder = setup_model(p)

    # Training
    all_params = ComputationGraph([ladder.costs.total]).parameters
    logger.info('Found the following parameters: %s' % str(all_params))

    # Fetch all batch normalization updates. They are in the clean path.
    # you can turn off BN by setting is_normalizing = False in ladder.py
    bn_updates = ComputationGraph([ladder.costs.class_clean]).updates
    assert not bn_updates or 'counter' in [u.name for u in bn_updates.keys()], \
        'No batch norm params in graph - the graph has been cut?'

    training_algorithm = GradientDescent(
        cost=ladder.costs.total,
        parameters=all_params,
        step_rule=Adam(learning_rate=ladder.lr))
    # In addition to actual training, also do BN variable approximations
    if bn_updates:
        training_algorithm.add_updates(bn_updates)

    short_prints = {
        "train":
        OrderedDict([
            ('T_E', ladder.error.clean),
            ('T_O', ladder.oos.clean),
            ('T_C_class', ladder.costs.class_corr),
            ('T_C_de', ladder.costs.denois.values()),
            ('T_T', ladder.costs.total),
        ]),
        "valid_approx":
        OrderedDict([
            ('V_C_class', ladder.costs.class_clean),
            ('V_E', ladder.error.clean),
            ('V_O', ladder.oos.clean),
            ('V_C_de', ladder.costs.denois.values()),
            ('V_T', ladder.costs.total),
        ]),
        "valid_final":
        OrderedDict([
            ('VF_C_class', ladder.costs.class_clean),
            ('VF_E', ladder.error.clean),
            ('VF_O', ladder.oos.clean),
            ('VF_C_de', ladder.costs.denois.values()),
            ('V_T', ladder.costs.total),
        ]),
    }

    if len(data.valid_ind):
        main_loop = MainLoop(
            training_algorithm,
            # Datastream used for training
            make_datastream(data.train,
                            data.train_ind,
                            p.batch_size,
                            n_labeled=p.labeled_samples,
                            n_unlabeled=p.unlabeled_samples,
                            whiten=whiten,
                            cnorm=cnorm,
                            balanced_classes=p.balanced_classes,
                            dseed=p.dseed),
            model=Model(ladder.costs.total),
            extensions=[
                FinishAfter(after_n_epochs=p.num_epochs),

                # This will estimate the validation error using
                # running average estimates of the batch normalization
                # parameters, mean and variance
                ApproxTestMonitoring([
                    ladder.costs.class_clean, ladder.error.clean,
                    ladder.oos.clean, ladder.costs.total
                ] + ladder.costs.denois.values(),
                                     make_datastream(
                                         data.valid,
                                         data.valid_ind,
                                         p.valid_batch_size,
                                         whiten=whiten,
                                         cnorm=cnorm,
                                         balanced_classes=p.balanced_classes,
                                         scheme=ShuffledScheme),
                                     prefix="valid_approx"),

                # This Monitor is slower, but more accurate since it will first
                # estimate batch normalization parameters from training data and
                # then do another pass to calculate the validation error.
                FinalTestMonitoring(
                    [
                        ladder.costs.class_clean, ladder.error.clean,
                        ladder.oos.clean, ladder.costs.total
                    ] + ladder.costs.denois.values(),
                    make_datastream(data.train,
                                    data.train_ind,
                                    p.batch_size,
                                    n_labeled=p.labeled_samples,
                                    whiten=whiten,
                                    cnorm=cnorm,
                                    balanced_classes=p.balanced_classes,
                                    scheme=ShuffledScheme),
                    make_datastream(data.valid,
                                    data.valid_ind,
                                    p.valid_batch_size,
                                    n_labeled=len(data.valid_ind),
                                    whiten=whiten,
                                    cnorm=cnorm,
                                    balanced_classes=p.balanced_classes,
                                    scheme=ShuffledScheme),
                    prefix="valid_final",
                    after_n_epochs=p.num_epochs,
                    after_training=True),
                TrainingDataMonitoring([
                    ladder.error.clean, ladder.oos.clean, ladder.costs.total,
                    ladder.costs.class_corr,
                    training_algorithm.total_gradient_norm
                ] + ladder.costs.denois.values(),
                                       prefix="train",
                                       after_epoch=True),
                # ladder.costs.class_clean - save model whenever we have best validation result another option `('train',ladder.costs.total)`
                SaveParams(('valid_approx', ladder.error.clean),
                           all_params,
                           p.save_dir,
                           after_epoch=True),
                SaveExpParams(p, p.save_dir, before_training=True),
                SaveLog(p.save_dir, after_training=True),
                ShortPrinting(short_prints),
                LRDecay(ladder.lr,
                        p.num_epochs * p.lrate_decay,
                        p.num_epochs,
                        lrmin=p.lrmin,
                        after_epoch=True),
            ])
    else:
        main_loop = MainLoop(
            training_algorithm,
            # Datastream used for training
            make_datastream(data.train,
                            data.train_ind,
                            p.batch_size,
                            n_labeled=p.labeled_samples,
                            n_unlabeled=p.unlabeled_samples,
                            whiten=whiten,
                            cnorm=cnorm,
                            balanced_classes=p.balanced_classes,
                            dseed=p.dseed),
            model=Model(ladder.costs.total),
            extensions=[
                FinishAfter(after_n_epochs=p.num_epochs),
                TrainingDataMonitoring([
                    ladder.error.clean, ladder.oos.clean, ladder.costs.total,
                    ladder.costs.class_corr,
                    training_algorithm.total_gradient_norm
                ] + ladder.costs.denois.values(),
                                       prefix="train",
                                       after_epoch=True),
                # ladder.costs.class_clean - save model whenever we have best validation result another option `('train',ladder.costs.total)`
                SaveParams(('train', ladder.error.clean),
                           all_params,
                           p.save_dir,
                           after_epoch=True),
                SaveExpParams(p, p.save_dir, before_training=True),
                SaveLog(p.save_dir, after_training=True),
                ShortPrinting(short_prints),
                LRDecay(ladder.lr,
                        p.num_epochs * p.lrate_decay,
                        p.num_epochs,
                        lrmin=p.lrmin,
                        after_epoch=True),
            ])
    main_loop.run()

    # Get results
    if len(data.valid_ind) == 0:
        return None

    df = DataFrame.from_dict(main_loop.log, orient='index')
    col = 'valid_final_error_rate_clean'
    logger.info('%s %g' % (col, df[col].iloc[-1]))

    if main_loop.log.status['epoch_interrupt_received']:
        return None
    return df
Ejemplo n.º 2
0
def train(cli_params):
    cli_params['save_dir'] = prepare_dir(cli_params['save_to'])
    logfile = os.path.join(cli_params['save_dir'], 'log.txt')

    # Log also DEBUG to a file
    fh = logging.FileHandler(filename=logfile)
    fh.setLevel(logging.DEBUG)
    logger.addHandler(fh)

    logger.info('Logging into %s' % logfile)

    p, loaded = load_and_log_params(cli_params)
    in_dim, data = setup_data(p, test_set=False)
    if not loaded:
        # Set the zero layer to match input dimensions
        p.encoder_layers = (in_dim, ) + p.encoder_layers

    ladder = setup_model(p)

    # Training
    all_params = ComputationGraph([ladder.costs.total]).parameters
    logger.info('Found the following parameters: %s' % str(all_params))

    # Fetch all batch normalization updates. They are in the clean path.
    bn_updates = ComputationGraph([ladder.costs.class_clean]).updates
    assert 'counter' in [u.name for u in list(bn_updates.keys())], \
        'No batch norm params in graph - the graph has been cut?'

    training_algorithm = GradientDescent(
        cost=ladder.costs.total,
        params=all_params,
        step_rule=Adam(learning_rate=ladder.lr))
    # In addition to actual training, also do BN variable approximations
    training_algorithm.add_updates(bn_updates)

    short_prints = {
        "train": {
            'T_C_class': ladder.costs.class_corr,
            'T_C_de': list(ladder.costs.denois.values()),
        },
        "valid_approx":
        OrderedDict([
            ('V_C_class', ladder.costs.class_clean),
            ('V_E', ladder.error.clean),
            ('V_C_de', list(ladder.costs.denois.values())),
        ]),
        "valid_final":
        OrderedDict([
            ('VF_C_class', ladder.costs.class_clean),
            ('VF_E', ladder.error.clean),
            ('VF_C_de', list(ladder.costs.denois.values())),
        ]),
    }

    main_loop = MainLoop(
        training_algorithm,
        # Datastream used for training
        make_datastream(data.train,
                        data.train_ind,
                        p.batch_size,
                        n_labeled=p.labeled_samples,
                        n_unlabeled=p.unlabeled_samples),
        model=Model(theano.tensor.cast(ladder.costs.total, "float32")),
        extensions=[
            FinishAfter(after_n_epochs=p.num_epochs),

            # This will estimate the validation error using
            # running average estimates of the batch normalization
            # parameters, mean and variance
            ApproxTestMonitoring(
                [ladder.costs.class_clean, ladder.error.clean] +
                list(ladder.costs.denois.values()),
                make_datastream(data.valid,
                                data.valid_ind,
                                p.valid_batch_size,
                                scheme=ShuffledScheme),
                prefix="valid_approx"),
            TrainingDataMonitoring([
                ladder.costs.total, ladder.costs.class_corr,
                training_algorithm.total_gradient_norm
            ] + list(ladder.costs.denois.values()),
                                   prefix="train",
                                   after_epoch=True),
            SaveParams(None, all_params, p.save_dir, after_epoch=True),
            SaveExpParams(p, p.save_dir, before_training=True),
            ShortPrinting(short_prints),
            LRDecay(ladder.lr,
                    p.num_epochs * p.lrate_decay,
                    p.num_epochs,
                    after_epoch=True),
        ])
    main_loop.run()

    # Get results
    df = main_loop.log.to_dataframe()
    col = 'valid_final_error_rate_clean'
    logger.info('%s %g' % (col, df[col].iloc[-1]))

    if main_loop.log.status['epoch_interrupt_received']:
        return None
    return df
Ejemplo n.º 3
0
    def train(self):
        """ Setup and train the model """
        to_train = ComputationGraph([self.tagger.total_cost]).parameters
        logger.info('Found the following parameters: %s' % str(to_train))

        step_rule = Adam(learning_rate=self.p.lr)
        training_algorithm = GradientDescent(
            cost=self.tagger.total_cost, parameters=to_train, step_rule=step_rule,
            on_unused_sources='warn',
            theano_func_kwargs={'on_unused_input': 'warn'}
        )

        # TRACKED GRAPH NODES
        train_params = {
            'Train_Denoising_Cost': self.tagger.corr.denoising_cost,
        }
        if self.p.class_cost_x > 0:
            train_params['Train_Classification_Cost'] = self.tagger.corr.class_cost
            train_params['Train_Classification_Error'] = self.tagger.clean.class_error

        valid_params = {
            'Validation_Denoising_Cost': self.tagger.corr.denoising_cost,
        }
        if self.p.class_cost_x > 0:
            valid_params['Validation_Classification_Cost'] = self.tagger.corr.class_cost
            valid_params['Validation_Classification_Error'] = self.tagger.clean.class_error

        test_params = {
            'Test_AMI_Score': self.tagger.clean.ami_score,
            'Test_Denoising_Cost': self.tagger.corr.denoising_cost,
        }
        if self.p.class_cost_x > 0:
            test_params['Test_Classification_Cost'] = self.tagger.corr.class_cost
            test_params['Test_Classification_Error'] = self.tagger.clean.class_error

        short_prints = {
            "train": train_params,
            "valid": valid_params,
            "test": test_params,
        }

        main_loop = MainLoop(
            training_algorithm,
            # Datastream used for training
            self.streams['train'],
            model=Model(self.tagger.total_cost),
            extensions=[
                FinishAfter(after_n_epochs=self.p.num_epochs),
                SaveParams(self.p.get('save_freq', 0), self.tagger, self.save_dir, before_epoch=True),
                DataStreamMonitoring(
                    valid_params.values(),
                    self.streams['valid'],
                    prefix="valid"
                ),
                FinalTestMonitoring(
                    test_params.values(),
                    self.streams['train'],
                    {'valid': self.streams['valid'], 'test': self.streams['test']},
                    after_training=True
                ),
                TrainingDataMonitoring(
                    train_params.values(),
                    prefix="train", after_epoch=True
                ),
                SaveExpParams(self.p, self.save_dir, before_training=True),
                Timing(after_epoch=True),
                ShortPrinting(short_prints, after_epoch=True),
            ])
        logger.info('Running the main loop')
        main_loop.run()