Ejemplo n.º 1
0
def run():
    if 'COMET_KEY' not in os.environ:
        print('Please set the COMET_KEY environment variable')
        print('e.g. by running with: `COMET_KEY="your-key" python example.py`')
        return
    api_key = os.environ['COMET_KEY']
    # Create an experiment
    experiment = Experiment(api_key=api_key,
                            project_name="tabular_q",
                            workspace="irl-benchmark")

    metrics_logger = MetricsLogger()

    def metric_listener(reward):
        experiment.log_metric('reward', reward)
        metrics_logger.log_metric('reward', reward)

    duration = 5
    hyper_params = {'gamma': 0.95, 'alpha_start': 0.8}

    # Report hparams
    experiment.log_multiple_params(hyper_params)

    env = gym.make('FrozenLake-v0')
    agent = TabularQ(env, **hyper_params)
    rewards = agent.train(duration, metric_listener)

    experiment.log_metric('MA(100) reward', np.mean(rewards[-100:]))

    metrics_logger.save('data/example_tabular_q_metrics.json')
def run(**hyper_params):
    if 'COMET_KEY' not in os.environ:
        print('Please set the COMET_KEY environment variable')
        print('e.g. by running with: `COMET_KEY="your-key" python example.py`')
        sys.exit()
    api_key = os.environ['COMET_KEY']
    # Create an experiment
    experiment = Experiment(
        api_key=api_key, project_name="tabular_q", workspace="irl-benchmark")

    def metric_listener(reward):
        experiment.log_metric('reward', reward)

    duration = 5

    # Report hparams
    experiment.log_multiple_params(hyper_params)

    env = gym.make('FrozenLake-v0')
    agent = TabularQ(env, **hyper_params)

    rewards = agent.train(duration, metric_listener)
    ma_100 = np.mean(rewards[-100:])
    experiment.log_metric('MA(100) reward', ma_100)

    print('Params:', hyper_params, 'got ma_100 reward', ma_100)
    return rewards
Ejemplo n.º 3
0
def main():
    config = load_config()
    if config['random_seed']:
        random.seed(config['random_seed'])
        torch.manual_seed(config['random_seed'])
    disable_cometml = not config['use_cometml']
    experiment = Experiment(api_key=config['comet_key'], project_name=config['project_name'], disabled=disable_cometml)

    model_config = config['model']
    experiment.log_multiple_params(model_config)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    train_dataset = StockDataset(config, device=device, mode='train')
    train_data_loader = DataLoader(train_dataset, batch_size=config['model']['batch_size'], shuffle=True)

    dev_dataset = StockDataset(config, device=device, mode='dev')
    dev_data_loader = DataLoader(dev_dataset, batch_size=config['model']['batch_size'], shuffle=False)

    model = Model(input_size=model_config['input_size'],
                  hidden_size=model_config['hidden_size'],
                  output_size=model_config['output_size'],
                  num_layers=model_config['num_layers'],
                  dropout=model_config['dropout'],
                  device=device)
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=model_config['learning_rate'])

    train_and_evaluate(model=model,
                       criterion=criterion,
                       optimizer=optimizer,
                       train_data_loader=train_data_loader,
                       dev_data_loader=dev_data_loader,
                       epochs_count=model_config['epochs_count'],
                       experiment=experiment)
Ejemplo n.º 4
0
    def init_callbacks(self):
        self.callbacks.append(
            ModelCheckpoint(
                filepath=os.path.join(
                    self.config.callbacks.checkpoint_dir,
                    '%s-{epoch:02d}-{val_loss:.2f}.hdf5' %
                    self.config.exp.name),
                monitor=self.config.callbacks.checkpoint_monitor,
                mode=self.config.callbacks.checkpoint_mode,
                save_best_only=self.config.callbacks.checkpoint_save_best_only,
                save_weights_only=self.config.callbacks.
                checkpoint_save_weights_only,
                verbose=self.config.callbacks.checkpoint_verbose,
            ))

        self.callbacks.append(
            TensorBoard(
                log_dir=self.config.callbacks.tensorboard_log_dir,
                write_graph=self.config.callbacks.tensorboard_write_graph,
            ))

        if hasattr(self.config, "comet_api_key"):
            from comet_ml import Experiment
            experiment = Experiment(api_key=self.config.comet_api_key,
                                    project_name=self.config.exp_name)
            experiment.disable_mp()
            experiment.log_multiple_params(self.config)
            self.callbacks.append(experiment.get_keras_callback())
Ejemplo n.º 5
0
    def init_callbacks(self):
        self.callbacks.append(
            ModelCheckpoint(
                filepath=os.path.join(
                    self.checkpoint_dir, '%s-{epoch:02d}-{val_loss:.2f}.hdf5' %
                    self.config['exp_name']),
                monitor=self.config['checkpoint_monitor'],
                mode=self.config['checkpoint_mode'],
                save_best_only=self.config['checkpoint_save_best_only'],
                save_weights_only=self.config['checkpoint_save_weights_only'],
                verbose=self.config['checkpoint_verbose'],
            ))

        self.callbacks.append(
            TensorBoard(
                log_dir=self.tensorboard_log_dir,
                write_graph=self.config['tensorboard_write_graph'],
                histogram_freq=0,  # don't compute histograms
                write_images=
                False  # don't write model weights to visualize as image in TensorBoard
            ))

        if hasattr(self.config, "comet_api_key"):
            from comet_ml import Experiment
            experiment = Experiment(api_key=self.config['comet_api_key'],
                                    project_name=self.config['exp_name'])
            experiment.disable_mp()
            experiment.log_multiple_params(self.config)
            self.callbacks.append(experiment.get_keras_callback())
Ejemplo n.º 6
0
def train_autofeature_model(data_path,
                            embedding_dimension,
                            batch_size):
    training_data = load_data(data_path)
    X = training_data.drop(labels=['SK_ID_CURR'], axis=1)

    experiment = Experiment(
        api_key=API_KEY, project_name="home-credit")
    experiment.set_name(
        'home-credit-autofeature-selection')

    model = build_model(X.shape[1], int(embedding_dimension))
    model.summary()
    model.compile(optimizer='adam', loss='mean_squared_logarithmic_error')

    model.fit(X, X,
              epochs=5,
              batch_size=int(batch_size))

    experiment.log_multiple_params(
        {"embedding_dimension": embedding_dimension,
         "batch_size": batch_size})

    model.save(
        'home-credit-encoder-{}-{}.hdf5'.format(
            embedding_dimension, batch_size))
    def init_callbacks(self):
        self.callbacks.append(
            ModelCheckpoint(
                filepath=os.path.join(self.config.checkpoint_dir, '%s-{epoch:03d}-{val_nme:.5f}.hdf5' % self.config.exp_name),
                monitor=self.config.checkpoint_monitor,
                mode=self.config.checkpoint_mode,
                save_best_only=self.config.checkpoint_save_best_only,
                save_weights_only=self.config.checkpoint_save_weights_only,
                verbose=self.config.checkpoint_verbose,
            )
        )

        self.callbacks.append(
                TensorBoard(
                    log_dir=self.config.tensorboard_log_dir,
                    write_graph=self.config.tensorboard_write_graph,
                )
            )

        # self.callbacks.append(
        #     LearningRateScheduler(self.lr_scheduler)
        # )

        if hasattr(self.config,"comet_api_key"):
            from comet_ml import Experiment
            experiment = Experiment(api_key=self.config.comet_api_key, project_name=self.config.exp_name)
            experiment.disable_mp()
            experiment.log_multiple_params(self.config)
            self.callbacks.append(experiment.get_keras_callback())
Ejemplo n.º 8
0
def train(path):
    name = os.path.splitext(os.path.basename(path))[0]
    print('Processing: ', name)
    features = pd.read_csv(path, index_col=None)
    selected_features_names = [name for name, desc in selected_features]
    features = features[selected_features_names]
    split_idx = 1200
    features = features.drop(['sound.files'], axis=1)
    noise_only_df, df = features.iloc[:split_idx], features.iloc[split_idx:]
    y = df.pop('petrel')
    X = df.values
    y_noise = noise_only_df.pop('petrel')
    X_noise = noise_only_df.values
    X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.25, random_state=42, stratify=y)
    hyperparams = {
        'n_estimators': [100, 300, 500, 1000],
        'learning_rate': [0.1],
        'gamma': [0.0, 0.5],
        'max_depth': [2, 3, 4],
        'min_child_weight': [1, 2],
        'subsample': [1.0, 0.8],
        'reg_alpha': [0.0, 0.1],
        'reg_lambda': [1, 2, 3]
    }
    #
    # hyperparams = {
    #     'n_estimators': [100],
    #     'learning_rate': [0.1],
    #     'gamma': [0.0],
    #     'max_depth': [2],
    #     'min_child_weight': [1],
    #     'subsample': [1.0],
    #     'reg_alpha': [0.0],
    #     'reg_lambda': [1]
    # }

    clf = model_selection.GridSearchCV(estimator=xg.XGBClassifier(objective='binary:logistic', n_jobs=-1),
                                       param_grid=hyperparams,
                                       cv=4)
    fit_params = clf.fit(X_train, y_train)
    estimator = fit_params.best_estimator_
    joblib.dump(estimator, name + '_model.pkl')

    test_pred = estimator.predict(X_test)
    metrics = calculate_metrics(test_pred, y_test)

    noise_pred = estimator.predict(X_noise)
    noise_detection_accuracy = accuracy_score(y_noise, noise_pred)

    experiment = Experiment(api_key="4PdGdUZmGf6P8QsMa5F2zB4Ui",
                            project_name="storm petrels",
                            workspace="tracewsl")
    experiment.set_name(name)
    experiment.log_parameter('name', name)
    experiment.log_multiple_params(fit_params.best_params_)
    experiment.log_multiple_metrics(metrics)
    experiment.log_metric('Noise detection accuracy', noise_detection_accuracy)
    experiment.log_figure('Confusion matrix', get_confusion_matrix_figure(test_pred, y_test))
    experiment.log_figure('Feature importnace', get_feature_importance_figure(estimator, list(df.columns.values)))
Ejemplo n.º 9
0
class Logger:
    def __init__(self, sess, config):
        self.sess = sess
        self.config = config
        self.summary_placeholders = {}
        self.summary_ops = {}
        self.train_summary_writer = tf.summary.FileWriter(os.path.join(self.config.summary_dir, "train"),
                                                          self.sess.graph)
        self.test_summary_writer = tf.summary.FileWriter(
            os.path.join(self.config.summary_dir, "test"))

        if "comet_api_key" in config:
            from comet_ml import Experiment
            self.experiment = Experiment(
                api_key=config['comet_api_key'], project_name=config['exp_name'])
            self.experiment.disable_mp()
            self.experiment.log_multiple_params(config)

    # it can summarize scalars and images.
    def summarize(self, step, summarizer="train", scope="", summaries_dict=None):
        """
        :param step: the step of the summary
        :param summarizer: use the train summary writer or the test one
        :param scope: variable scope
        :param summaries_dict: the dict of the summaries values (tag,value)
        :return:
        """
        summary_writer = self.train_summary_writer if summarizer == "train" else self.test_summary_writer
        with tf.variable_scope(scope):

            if summaries_dict is not None:
                summary_list = []
                for tag, value in summaries_dict.items():
                    if tag not in self.summary_ops:
                        if len(value.shape) <= 1:
                            self.summary_placeholders[tag] = tf.placeholder(
                                'float32', value.shape, name=tag)
                        else:
                            self.summary_placeholders[tag] = tf.placeholder('float32', [None] + list(value.shape[1:]),
                                                                            name=tag)
                        if len(value.shape) <= 1:
                            self.summary_ops[tag] = tf.summary.scalar(
                                tag, self.summary_placeholders[tag])
                        else:
                            self.summary_ops[tag] = tf.summary.image(
                                tag, self.summary_placeholders[tag])

                    summary_list.append(self.sess.run(self.summary_ops[tag], {
                                        self.summary_placeholders[tag]: value}))

                for summary in summary_list:
                    summary_writer.add_summary(summary, step)

                if hasattr(self, 'experiment') and self.experiment is not None:
                    self.experiment.log_multiple_metrics(
                        summaries_dict, step=step)

                summary_writer.flush()
Ejemplo n.º 10
0
def train():
    x_lines = [
        *toolz.take(LIMIT,
                    open('data/x.txt').read().lower().split('\n'))
    ]
    y_lines = [
        *toolz.take(LIMIT,
                    open('data/y.txt').read().lower().split('\n'))
    ]

    encoder = encoder_for_lines(S2S_PARAMS, x_lines + y_lines)

    try:
        start_idx = encoder.word_vocab[S2S_PARAMS.start_token]
        pad_idx = encoder.word_vocab[PAD_TOKEN]
    except AttributeError:
        start_idx = int(encoder.vocabulary_[S2S_PARAMS.start_token])
        pad_idx = encoder.vocabulary_[PAD_TOKEN]

    reverse_enc = {idx: word for word, idx in encoder.vocabulary_.items()}
    model = build_model(S2S_PARAMS, start_idx, pad_idx)

    x = encode_data(encoder, x_lines, is_input=True)
    y = encode_data(encoder, y_lines, is_input=False)

    print(x.shape, y.shape)

    x = x[:S2S_PARAMS.batch_size * int(len(x) / S2S_PARAMS.batch_size)]
    y = y[:S2S_PARAMS.batch_size * int(len(y) / S2S_PARAMS.batch_size)]

    test_x = x[:S2S_PARAMS.batch_size]
    losses = []

    if USE_COMET:
        experiment = Experiment(api_key="DQqhNiimkjP0gK6c8iGz9orzL",
                                log_code=True)
        experiment.log_multiple_params(S2S_PARAMS._asdict())
        for idx in range(1000):
            print("Shuffling data...")
            random_idx = random.sample([*range(len(x))], len(x))
            x = x[random_idx]
            y = y[random_idx]
            print("Training in epoch " + str(idx))
            losses.append(model.train_epoch(x, y, experiment=experiment))
            experiment.log_epoch_end(idx)
            print('Loss history: {}'.format(', '.join(
                ['{:.4f}'.format(loss) for loss in losses])))
            test_y = model.predict(test_x)
            for i in range(min([3, S2S_PARAMS.batch_size])):
                print('> ' + ' '.join(
                    reverse_enc.get(idx, '<unk/>') for idx in list(test_y[i])))
    else:
        for idx in range(1000):
            print("Training in epoch " + str(idx))
            model.train_epoch(x, y)
Ejemplo n.º 11
0
def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--MIN_LENGTH', type=int, default=1, help='Min Length of sequence (Input side)')
    parser.add_argument('--MAX_LENGTH', type=int, default=40, help='Max Length of sequence (Input side)')
    parser.add_argument('--MIN_LENGTH_TARGET', type=int, default=1, help='Min Length of sequence (Output side)')
    parser.add_argument('--MAX_LENGTH_TARGET', type=int, default=40, help='Max Length of sequence (Output side)')
    parser.add_argument('--lang1', type=str, default="en", help='Input Language')
    parser.add_argument('--lang2', type=str, default="fr", help='Target Language')
    parser.add_argument('--USE_CUDA', action='store_true', help='IF USE CUDA (Default == False)')
    parser.add_argument('--attention', type=str, default='Bahdanau', help='attention type: either Bahdanau or Luong')
    # parser.add_argument('--teacher_forcing_ratio', type=float, default=1, help='Teacher forcing ratio for encoder')
    parser.add_argument('--hidden_size', type=int, default=1024, help='Size of hidden layer')
    parser.add_argument('--n_epochs', type=int, default=50000, help='Number of single iterations through the data')
    parser.add_argument('--learning_rate', type=float, default=0.0001, help='Learning rate (for both, encoder and decoder)')
    parser.add_argument('--n_layers', type=int, default=2, help='Number of layers (for both, encoder and decoder)')
    parser.add_argument('--dropout', type=float, default=0.1, help='Dropout (%) in the decoder')
    parser.add_argument('--model_type', type=str, default="seq2seq", help='Model type (and ending of files)')
    parser.add_argument('--main_data_dir', type=str, default= "/scratch/eff254/NLP/Data/Model_ready", help='Directory where data is saved (in folders tain/dev/test)')
    parser.add_argument('--out_dir', type=str, default="./checkpoints", help="Directory to save the models state dict (No default)")
    parser.add_argument('--eval_dir', type=str, default="/scratch/eff254/NLP/Evaluation", help="Directory to save predictions - MUST CONTAIN PEARL SCRIPT")
    parser.add_argument('--optimizer', type=str, default="Adam", help="Optimizer (Adam vs SGD). Default: Adam")
    parser.add_argument('--kmax', type=int, default=10, help="Beam search Topk to search")
    parser.add_argument('--clip', type=int, default=1, help="Clipping the gradients")
    parser.add_argument('--batch_size', type=int, default=128, help="Size of a batch")
    parser.add_argument('--min_count_trim_output', type=int, default=2, help="trim infrequent output words")
    parser.add_argument('--min_count_trim_input', type=int, default=2, help="trim infrequent input words")
    parser.add_argument('--save_every', type=int, default=100, help='Checkpoint model after number of iters')
    parser.add_argument('--print_every', type=int, default=10, help='Print training loss after number of iters')
    parser.add_argument('--eval_every', type=int, default=100, help='Evaluate translation on one dev pair after number of iters')
    parser.add_argument('--bleu_every', type=int, default=200, help='Get bleu score number of iters')
    parser.add_argument('--scheduled_sampling_k', type=int, default=3000, help='scheduled sampling parameter for teacher forcing, \
        based on inverse sigmoid decay')
    parser.add_argument('--experiment', type=str, default="MICA", help='experiment name')

    opt = parser.parse_args()
    print(opt)

    if opt.experiment is None:
        opt.experiment = 'MICA_experiment'

    target_char = (opt.model_type == 'bpe2char')
    if target_char:
        opt.MAX_LENGTH_TARGET = 200

    ######## Comet ML ########
    #experiment = comet_mirror("Experiment2")
    experiment = Experiment(api_key="00Z9vIf4wOLZ0yrqzdwHqttv4", log_code=True)
    hyper_params = vars(opt)
    experiment.log_multiple_params(hyper_params)


    # flag for character encoding
    
    return opt, target_char, experiment
Ejemplo n.º 12
0
def init_comet(params, trainer):
    if params['comet_key'] is not None:
        from comet_ml import Experiment
        from trainer.plugins import CometPlugin
        experiment = Experiment(api_key=params['comet_key'], log_code=False)
        hyperparams = {
            name: param_to_string(params[name])
            for name in tag_params
        }
        experiment.log_multiple_params(hyperparams)
        trainer.register_plugin(
            CometPlugin(experiment, [('training_loss', 'epoch_mean'),
                                     'validation_loss', 'test_loss']))
Ejemplo n.º 13
0
def init_comet(params, trainer):
    if params['comet_key']:
        from comet_ml import Experiment
        experiment = Experiment(api_key=params['comet_key'],
                                project_name=params['comet_project_name'],
                                log_code=False)
        hyperparams = {name: str(params[name]) for name in params}
        experiment.log_multiple_params(hyperparams)
        trainer.register_plugin(
            CometPlugin(experiment, [
                'G_loss.epoch_mean', 'D_loss.epoch_mean', 'D_real.epoch_mean',
                'D_fake.epoch_mean', 'sec.kimg', 'sec.tick', 'kimg_stat'
            ] + (['depth', 'alpha'] if params['progressive_growing'] else [])))
    else:
        print('Comet_ml logging disabled.')
def train(hyper_params):
    mnist = get_data()

    # Get graph definition, tensors and ops
    train_step, cross_entropy, accuracy, x, y, y_ = build_model_graph(
        hyper_params)

    # log parameters to Comet.ml
    import os
    # Setting the API key (saved as environment variable)
    exp = Experiment(
        #api_key="YOUR API KEY",
        # or
        api_key=os.environ.get("COMET_API_KEY"),
        project_name='comet-examples')
    exp.log_multiple_params(hyper_params)
    exp.log_dataset_hash(mnist)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        exp.set_model_graph(sess.graph)

        for i in range(hyper_params["steps"]):
            batch = mnist.train.next_batch(hyper_params["batch_size"])
            exp.set_step(i)
            # Compute train accuracy every 10 steps
            if i % 10 == 0:
                train_accuracy = accuracy.eval(feed_dict={
                    x: batch[0],
                    y_: batch[1]
                })
                print('step %d, training accuracy %g' % (i, train_accuracy))
                exp.log_metric("acc", train_accuracy)

            # Update weights (back propagation)
            loss = train_step.run(feed_dict={x: batch[0], y_: batch[1]})
            exp.log_metric("loss", loss)

        ### Finished Training ###

        # Compute test accuracy
        acc = accuracy.eval(feed_dict={
            x: mnist.test.images,
            y_: mnist.test.labels
        })

        print('test accuracy %g' % acc)
Ejemplo n.º 15
0
class Logger(object):
    def __init__(self, dataset_name, model_name):
        self.model_name = model_name
        self.project_name = "%s-%s" % (dataset_name, self.model_name)
        self.logdir = os.path.join(hp.logdir, self.project_name)
        self.writer = SummaryWriter(log_dir=self.logdir)

        self.experiment = None  # Experiment(api_key="luY5eUQDsBynS168WxJiRPJmJ", project_name=self.project_name, log_code=False)
        if hp.comet_ml_api_key is not None:
            self.experiment = Experiment(api_key=hp.comet_ml_api_key,
                                         project_name=self.project_name,
                                         log_code=False)
            self.experiment.log_multiple_params(
                dict((name, getattr(hp, name)) for name in dir(hp)
                     if not name.startswith('__')))

    def log_step(self, phase, step, loss_dict, image_dict):
        if phase == 'train':
            if step % 50 == 0:
                if self.experiment is not None:
                    with self.experiment.train():
                        self.experiment.log_multiple_metrics(loss_dict,
                                                             step=step)

                # self.writer.add_scalar('lr', get_lr(), step)
                # self.writer.add_scalar('%s-step/loss' % phase, loss, step)
                for key in sorted(loss_dict):
                    self.writer.add_scalar('%s-step/%s' % (phase, key),
                                           loss_dict[key], step)

            if step % 1000 == 0:
                for key in sorted(image_dict):
                    self.writer.add_image('%s/%s' % (self.model_name, key),
                                          image_dict[key], step)

    def log_epoch(self, phase, step, loss_dict):
        for key in sorted(loss_dict):
            self.writer.add_scalar('%s/%s' % (phase, key), loss_dict[key],
                                   step)

        if phase == 'valid':
            if self.experiment is not None:
                with self.experiment.validate():
                    self.experiment.log_multiple_metrics(loss_dict, step=step)
    def train(self, **kwargs):
        self._convert_annotation_library_to_tfrecords()

        luminoth_config, hyperparams = self._generate_train_config(**kwargs)

        experiment = Experiment(
            api_key=config["credentials"]["comet_ml"]["api_key"],
            project_name=config["credentials"]["comet_ml"]["project_name"],
            log_code=False,
            log_graph=False,
            auto_param_logging=False,
            auto_metric_logging=False,
            auto_output_logging=None,
            log_env_details=False,
            log_git_metadata=False
        )

        experiment.log_multiple_params(hyperparams)

        luminoth_train(luminoth_config, environment="local", experiment=experiment)
Ejemplo n.º 17
0
def init_comet(params, trainer):
    if params['comet_key']:
        from comet_ml import Experiment
        experiment = Experiment(api_key=params['comet_key'], project_name=params['comet_project_name'], log_code=False)
        hyperparams = {
            name: str(params[name]) for name in params
        }
        experiment.log_multiple_params(hyperparams)
        trainer.register_plugin(CometPlugin(
            experiment, [
                'G_loss.epoch_mean',
                'D_loss.epoch_mean',
                'D_real.epoch_mean',
                'D_fake.epoch_mean',
                'sec.kimg',
                'sec.tick',
                'kimg_stat'
            ] + (['depth', 'alpha'] if params['progressive_growing'] else [])
        ))
    else:
        print('Comet_ml logging disabled.')
def train(hyper_params):
    mnist = get_data()

    # Get graph definition, tensors and ops
    train_step, cross_entropy, accuracy, x, y, y_ = build_model_graph(
        hyper_params)

    experiment = Experiment(api_key="XXXX", project_name='my project')
    experiment.log_multiple_params(hyper_params)
    experiment.log_dataset_hash(mnist)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        experiment.set_model_graph(sess.graph)

        for i in range(hyper_params["steps"]):
            batch = mnist.train.next_batch(hyper_params["batch_size"])
            experiment.set_step(i)
            # Compute train accuracy every 10 steps
            if i % 10 == 0:
                train_accuracy = accuracy.eval(feed_dict={
                    x: batch[0],
                    y_: batch[1]
                })
                print('step %d, training accuracy %g' % (i, train_accuracy))
                experiment.log_metric("acc", train_accuracy)

            # Update weights (back propagation)
            loss = train_step.run(feed_dict={x: batch[0], y_: batch[1]})
            experiment.log_metric("loss", loss)

        ### Finished Training ###

        # Compute test accuracy
        acc = accuracy.eval(feed_dict={
            x: mnist.test.images,
            y_: mnist.test.labels
        })

        print('test accuracy %g' % acc)
Ejemplo n.º 19
0
    def init_callbacks(self):
        self.callbacks.append(
            ModelCheckpoint(
                filepath=os.path.join(
                    self.config.callbacks.checkpoint_dir,
                    '%s-{epoch:02d}-{val_loss:.2f}.hdf5' %
                    self.config.exp.name),
                monitor=self.config.callbacks.checkpoint_monitor,
                mode=self.config.callbacks.checkpoint_mode,
                save_best_only=self.config.callbacks.checkpoint_save_best_only,
                save_weights_only=self.config.callbacks.
                checkpoint_save_weights_only,
                verbose=self.config.callbacks.checkpoint_verbose,
            ))

        self.callbacks.append(
            TensorBoard(
                log_dir=self.config.callbacks.tensorboard_log_dir,
                write_graph=self.config.callbacks.tensorboard_write_graph,
            ))

        # if the config has the debug flag on, turn on tfdbg (TODO: make it work)
        if hasattr(self.config, "debug"):
            if (self.config.debug == True):
                import keras.backend
                from tensorflow.python import debug as tf_debug
                print("#=========== DEBUG MODE ===========#")
                sess = keras.backend.get_session()
                sess = tf_debug.LocalCLIDebugWrapperSession(sess)
                keras.backend.set_session(sess)

        # if the config file has a comet_ml key, log on comet
        if hasattr(self.config, "comet_api_key"):
            from comet_ml import Experiment  # PUT the import in main
            experiment = Experiment(api_key=self.config.exp.comet_api_key,
                                    project_name=self.config.exp.name)
            experiment.disable_mp()
            experiment.log_multiple_params(self.config)
            self.callbacks.append(experiment.get_keras_callback())
def run_lightgbm(train_df, validation_df):
    train_data = lgb.Dataset(data=train_df.drop(columns=['TARGET']),
                             label=train_df['TARGET'])
    validation_data = lgb.Dataset(data=validation_df.drop(columns=['TARGET']),
                                  label=validation_df['TARGET'])
    num_round = 10

    params = """
    num_leaves integer [31, 51] [31]
    num_trees integer [50, 100] [50]
    """
    optimizer = Optimizer(API_KEY)
    optimizer.set_params(params)

    while True:
        suggestion = optimizer.get_suggestion()
        experiment = Experiment(api_key=API_KEY, project_name='home-credit')
        experiment.set_name('lightgbm')

        _param = {
            'num_leaves': suggestion['num_leaves'],
            'num_trees': suggestion['num_trees'],
            'objective': 'binary',
            'metric': 'auc'
        }

        experiment.log_multiple_params(_param)
        experiment.log_dataset_hash(
            pd.concat([train_df, validation_df], axis=0))
        bst = lgb.train(_param,
                        train_data,
                        num_round,
                        valid_sets=[validation_data])
        y_pred = bst.predict(validation_df.drop(columns=['TARGET']))

        auc_score = roc_auc_score(validation_df['TARGET'], y_pred)
        experiment.log_metric(name='auc_score', value=auc_score)
        suggestion.report_score("auc_score", auc_score)
Ejemplo n.º 21
0
def main(_):
    print("Model Architecture: {}".format(FLAGS.model_architecture))

    # Adjust some parameters
    if FLAGS.debug:
        FLAGS.small_label_set = False
        print("RUNNING IN DEBUG MODE")

    FLAGS.num_classes = utils.get_num_classes(FLAGS)

    X_train, y_train = data_utils.load_dataset_tf(FLAGS, mode="train")
    X_val, y_val = data_utils.load_dataset_tf(FLAGS, mode="val")

    # comet_ml experiment logging (https://www.comet.ml/)
    experiment = Experiment(api_key="J55UNlgtffTDmziKUlszSMW2w",
                            log_code=False)
    experiment.log_multiple_params(utils.gather_params(FLAGS))
    experiment.set_num_of_epocs(FLAGS.epochs)
    experiment.log_dataset_hash(X_train)

    tf.logging.set_verbosity(tf.logging.INFO)

    # Start a new, DEFAULT TensorFlow session.
    sess = tf.InteractiveSession()

    utils.set_seeds()  # Get deterministic behavior?

    model = models.create_model(FLAGS)
    fw = framework.Framework(sess, model, experiment, FLAGS)

    num_params = int(utils.get_number_of_params())
    model_size = num_params * 4
    print("\nNumber of trainable parameters: {}".format(num_params))
    print("Model is ~ {} bytes out of max 5000000 bytes\n".format(model_size))
    experiment.log_parameter("num_params", num_params)
    experiment.log_parameter("approx_model_size", model_size)

    fw.optimize(X_train, y_train, X_val, y_val)
Ejemplo n.º 22
0
def train(args):

    # So I don't frigging forget what caused working models
    save_args(args)

    if args["use_tf_debug"]:
        hooks = [tf_debug.LocalCLIDebugHook()]
    else:
        hooks = []

    if args["use_comet"]:
        # Add the following code anywhere in your machine learning file
        experiment = Experiment(api_key="bRptcjkrwOuba29GcyiNaGDbj",
                                project_name="macgraph",
                                workspace="davidhughhenrymack")
        experiment.log_multiple_params(args)

        if len(args["tag"]) > 0:
            experiment.add_tags(args["tag"])

    train_size = sum(
        1 for _ in tf.python_io.tf_record_iterator(args["train_input_path"]))
    logger.info(f"Training on {train_size} records")

    # ----------------------------------------------------------------------------------

    estimator = get_estimator(args)

    train_spec = tf.estimator.TrainSpec(
        input_fn=gen_input_fn(args, "train"),
        max_steps=args["train_max_steps"] *
        1000 if args["train_max_steps"] is not None else None,
        hooks=hooks)

    eval_spec = tf.estimator.EvalSpec(input_fn=gen_input_fn(args, "eval"),
                                      throttle_secs=args["eval_every"])

    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
Ejemplo n.º 23
0
class CometMLLogger(Logger):
    def __init__(self, logger_kwargs=None):
        super().__init__(logger_kwargs=logger_kwargs)

        self.experiment = Experiment(api_key=self.config["api_key"],
                                     project_name=self.config["project_name"],
                                     log_code=False,
                                     log_graph=False,
                                     auto_param_logging=False,
                                     auto_metric_logging=False,
                                     auto_output_logging=None,
                                     log_env_details=False,
                                     log_git_metadata=False)

        if "reward_func" in self.config:
            self.experiment.set_code(
                inspect.getsource(self.config["reward_func"]))

    def log_hyperparams(self, hyperparams, step=0):
        self.experiment.log_multiple_params(hyperparams, step=step)

    def log_metric(self, key, value, step=0):
        self.experiment.log_metric(key, value, step=step)
Ejemplo n.º 24
0
    'VOCAB_SIZE': VOCAB_SIZE,
    'BATCH_SIZE': BATCH_SIZE,
    'EMBED_SIZE': EMBED_SIZE,
    'SKIP_WINDOW': SKIP_WINDOW,
    'NUM_SAMPLED': NUM_SAMPLED,
    'LEARNING_RATE': LEARNING_RATE,
    'NUM_TRAIN_STEPS': NUM_TRAIN_STEPS,
    'VISUAL_FLD': VISUAL_FLD,
    'SKIP_STEP': SKIP_STEP,
    'DOWNLOAD_URL': DOWNLOAD_URL,
    'EXPECTED_BYTES': EXPECTED_BYTES,
    'NUM_VISUALIZE': NUM_VISUALIZE
}

exp = Experiment(api_key="YOUR-API-KEY", project_name='word2vec')
exp.log_multiple_params(params)


def word2vec(dataset):
    """ Build the graph for word2vec model and train it """
    # Step 1: get input, output from the dataset
    with tf.name_scope('data'):
        iterator = dataset.make_initializable_iterator()
        center_words, target_words = iterator.get_next()
    """ Step 2 + 3: define weights and embedding lookup.
    In word2vec, it's actually the weights that we care about
    """
    with tf.name_scope('embed'):
        embed_matrix = tf.get_variable(
            'embed_matrix',
            shape=[VOCAB_SIZE, EMBED_SIZE],
Ejemplo n.º 25
0
           padding='same',
           activation=params['activation']))
model.add(Dropout(params['dropout']))

model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer=params['optimizer'],
              metrics=['accuracy'])
#print model.summary() to preserve automatically in `Output` tab
print(model.summary())
params.update({'total_number_of_parameters': model.count_params()})

#will log metrics with the prefix 'train_'
with experiment.train():
    model.fit(X_train,
              y_train,
              epochs=params['epochs'],
              batch_size=params['batch_size'],
              verbose=1,
              validation_data=(X_test, y_test))

#will log metrics with the prefix 'test_'
with experiment.test():
    loss, accuracy = model.evaluate(X_test, y_test)
    metrics = {'loss': loss, 'accuracy': accuracy}
    experiment.log_multiple_metrics(metrics)

experiment.log_multiple_params(params)
experiment.log_dataset_hash(X_train)  #creates and logs a hash of your data
Ejemplo n.º 26
0
def main():

    args = get_args()
    hyperparams = vars(args)

    if not args.no_comet:
        experiment = Experiment(api_key="5yzCYxgDmFnt1fhJWTRQIkETT",
                                log_code=True)
        experiment.log_multiple_params(hyperparams)

    text_field = data.Field(tokenize=custom_tokenizer,
                            fix_length=args.sentence_len,
                            unk_token='<**UNK**>')
    label_field = data.Field(sequential=False, unk_token=None)
    pair_field = data.RawField()

    if args.dataset == 'multinli':
        print('Loading MultiNLI Dataset')
        train = get_dataset(text_field, label_field, pair_field, 'train')
        val = get_dataset(text_field, label_field, pair_field, args.val_set)
    elif args.dataset == 'snli':
        print('Loading SNLI Dataset')
        train, val, test = datasets.SNLI.splits(text_field, label_field)
        del test
    else:
        raise Exception('Incorrect Dataset Specified')

    text_field.build_vocab(train, max_size=args.max_vocab_size)
    label_field.build_vocab(train, val)

    if args.word_vectors:
        text_field.vocab.load_vectors(args.word_vectors)

    device = -1
    if args.cuda:
        device = None

    print('Generating Iterators')
    train_iter, val_iter = data.BucketIterator.splits(
        (train, val),
        batch_size=args.batch_size,
        shuffle=True,
        sort_key=sort_key,
        device=device)
    train_iter.repeat = False

    args.n_embed = len(text_field.vocab)
    args.d_out = len(label_field.vocab)
    args.n_cells = args.n_layers
    if args.bidir:
        args.n_cells *= 2
    print(args)

    if args.load_model:
        model = torch.load(args.load_model)
    else:
        model = MODELS[args.model_type](args)
        print('Loading Word Embeddings')
        model.embed.weight.data = text_field.vocab.vectors

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                        model.parameters()),
                                 lr=args.lr)

    if args.cuda:
        model = model.cuda()
        criterion = criterion.cuda()
    print(model)

    print('Training Model')

    best_val_acc = 0.0
    val_acc_history = []

    for epoch in range(1, args.n_epochs + 1):

        if (args.model_type == 'DA') and (best_val_acc >= args.DA_embed_train):
            model.embed.weight.requires_grad = True

        train_iter.init_epoch()
        for batch_ind, batch in enumerate(train_iter):
            model.train()
            optimizer.zero_grad()
            out = model(batch)
            loss = criterion(out, batch.label)
            loss.backward()
            clip_grad_norm(
                filter(lambda p: p.requires_grad, model.parameters()), 10)
            optimizer.step()

            if (batch_ind != 0) and (batch_ind % args.dev_every == 0):
                val_correct, val_loss = evaluate(val_iter, model, criterion)
                val_accuracy = 100 * val_correct / len(val)

                print('    Batch Step {}/{}, Val Loss: {:.4f}, Val Accuracy: {:.4f}'.\
                            format(batch_ind,
                                   len(train) // args.batch_size,
                                   val_loss,
                                   val_accuracy))

        train_correct, train_loss = evaluate(train_iter, model, criterion)
        val_correct, val_loss = evaluate(val_iter, model, criterion)
        val_accuracy = 100 * val_correct / len(val)
        val_acc_history.append(val_accuracy)

        stop_training = early_stop(val_acc_history)

        if not args.no_comet:
            experiment.log_metric("Train loss", train_loss)
            experiment.log_metric("Val loss", val_loss)
            experiment.log_metric("Accuracy (val)", val_accuracy)
            experiment.log_metric("Accuracy (train)",
                                  100 * train_correct / len(train))

        if args.save_model and (val_accuracy > best_val_acc):
            best_val_acc = val_accuracy
            if best_val_acc > 60:
                snapshot_path = '../saved_models/Model_{}_acc_{:.4f}_epoch_{}_model.pt'.format(
                    args.model_type, val_accuracy, epoch)

                if args.cuda:
                    torch.save(model.cpu(), snapshot_path)
                    model = model.cuda()
                else:
                    torch.save(model, snapshot_path)

        print('Epoch: {}, Train Loss: {:.4f}, Val Loss: {:.4f}, Train Acc: {:.2f}, Val Acc: {:.2f}, Best Val Acc: {:.2f}'.\
                format(epoch,
                       train_loss,
                       val_loss,
                       100 * train_correct / len(train),
                       val_accuracy,
                       best_val_acc))

        if stop_training:
            print('Early stop triggered.')
            break
Ejemplo n.º 27
0
        'l0': 1365
    },
    1: {
        'nb_merge': 0,
        'l0': 1014
    },
    2: {
        'nb_merge': 0,
        'l0': 717
    }
}

hyper_params = {'learning_rate': 0.0001, 'alphabet_size': 94}
hyper_params.update(dict_hyper_params[exp_index])

comet.log_multiple_params(hyper_params)

nb_classes = 2
batch_size = 128

num_epochs = 10
save_model_path = '/home/ecd353/NLP_project/experiments/save/models/amazon/exp' + str(
    exp_index) + '_best.pth.tar'
save_pred_path = '/home/ecd353/NLP_project/experiments/save/predictions/amazon/exp' + str(
    exp_index) + '.txt'

## generate dataset
print("generating dataset")
data_path = '/home/ecd353/NLP_project/data/amazon/'
training_set, validation_set = dataGenerator(data_path + 'train.txt',
                                             max_length=hyper_params['l0'])
Ejemplo n.º 28
0
def test_autoencoder():
    (x_train, _), (x_test, _) = mnist.load_data()

    x_train = x_train.astype('float32') / 255.
    x_test = x_test.astype('float32') / 255.
    x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
    x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))

    config = {
        "encoder": [{
            "kwargs": {
                "activation": "relu",
                "units": 256
            },
            "name": "hidden1",
            "type": "Dense"
        }, {
            "name": "batchnorm",
            "type": "BatchNormalization"
        }, {
            "kwargs": {
                "rate": 0
            },
            "name": "dropout",
            "type": "Dropout"
        }, {
            "kwargs": {
                "activation": "sigmoid",
            },
            "name": "latent",
            "regularizer": {
                "type": "l1",
                "value": 0
            },
            "type": "Dense"
        }]
    }

    latent_dim = 32
    latent_shape = (latent_dim, )
    input_shape = (x_train.shape[1], )

    print(latent_shape)
    print(input_shape)

    ae = Autoencoder(config["encoder"],
                     None,
                     input_shape=input_shape,
                     latent_shape=latent_shape,
                     loss="mean_squared_error",
                     optimizer_params=None)

    #experiment = Experiment(api_key="ac4P1dtMEjJf1d9hIo9CIuSXC", project_name="mnist-autoencode")
    experiment = Experiment(project_name="MNIST test",
                            api_key="50kNmWUHJrWHz3FlgtpITIsB1")
    experiment.log_parameter("Experiment name", "Testing ae")
    experiment.log_multiple_params(config)
    experiment.log_parameter("Latent dim", latent_shape[0])

    ae.fit(x_train, batch_size=1000, epochs=5, validation_data=x_test)

    predictions = ae.predict(x_test)

    scores = np.sqrt(((predictions - x_test)**2).mean())
    experiment.log_other("scores", scores)
    print(scores)

    print(predictions.shape)
    pred_imgs = predictions.reshape(-1, 28, 28)
    fig = plt.figure()
    for i, img in enumerate(pred_imgs[:5]):
        fig.add_subplot(2, 5, i + 1)
        plt.imshow(img)
        plt.axis('off')
        fig.add_subplot(2, 5, i + 6)
        plt.imshow(x_test[i].reshape(28, 28))
        plt.axis('off')
    plt.show()
Ejemplo n.º 29
0
def test_shared_embedding_autoencoder():
    (x_train, _), (x_test, _) = mnist.load_data()

    x_train = x_train.astype('float32') / 255.
    x_test = x_test.astype('float32') / 255.
    x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
    x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))

    f_idx = np.random.permutation(x_test.shape[1])
    n = int(len(f_idx) / 3)
    print(n)
    print(x_train[f_idx[n * 0:n * 0 + n]].shape)
    x_train_list = [x_train[:, f_idx[n * i:n * i + n]] for i in range(3)]
    x_test_list = [x_test[:, f_idx[n * i:n * i + n]] for i in range(3)]

    input_shapes = [(d.shape[1], ) for d in x_train_list]

    config = {
        "encoder": [{
            "kwargs": {
                "activation": "relu",
                "units": 256
            },
            "name": "hidden1",
            "type": "Dense"
        }, {
            "name": "batchnorm",
            "type": "BatchNormalization"
        }, {
            "kwargs": {
                "rate": 0
            },
            "name": "dropout",
            "type": "Dropout"
        }, {
            "kwargs": {
                "activation": "sigmoid",
            },
            "name": "latent",
            "regularizer": {
                "type": "l1",
                "value": 0
            },
            "type": "Dense"
        }]
    }

    latent_dim = 32
    latent_shape = (latent_dim, )

    print(latent_shape)
    print(input_shapes)

    ae = SharedEmbeddingAutoencoder(config["encoder"],
                                    None,
                                    input_shapes=input_shapes,
                                    latent_shape=latent_shape,
                                    loss="mean_squared_error",
                                    optimizer_params=None)

    #experiment = Experiment(api_key="ac4P1dtMEjJf1d9hIo9CIuSXC", project_name="mnist-autoencode")
    experiment = Experiment(project_name="MNIST test",
                            api_key="50kNmWUHJrWHz3FlgtpITIsB1")
    experiment.log_parameter("Experiment name", "Testing mae")
    experiment.log_multiple_params(config)
    experiment.log_parameter("Latent dim", latent_shape[0])

    ae.fit(x_train_list,
           batch_size=1000,
           epochs=10,
           validation_data=x_test_list)

    predictions = ae.predict(x_test_list)
    print([p.shape for p in predictions])
    #scores = [np.sqrt(((p- x)**2).mean()) for p,x in zip(predictions, x_test_list)]
    #experiment.log_other("scores", scores)
    #print(scores)

    #x_test_list = [x_test[f_idx[n*i:n*i+n]] for i in range(2)]

    predictions_combined = np.empty_like(x_test)
    for i in range(3):
        predictions_combined[:, f_idx[n * i:n * i + n]] = predictions[i]

    pred_imgs = predictions_combined.reshape(-1, 28, 28)
    print(pred_imgs.shape)
    fig = plt.figure()
    for i, img in enumerate(pred_imgs[:5]):
        fig.add_subplot(2, 5, i + 1)
        plt.imshow(img)
        plt.axis('off')
        fig.add_subplot(2, 5, i + 6)
        plt.imshow(x_test[i].reshape(28, 28))
        plt.axis('off')
    plt.show()
Ejemplo n.º 30
0
def main():

    args = get_args()
    hyperparams = vars(args)

    if not args.no_comet:
        experiment = Experiment(api_key="5yzCYxgDmFnt1fhJWTRQIkETT",
                                log_code=True)
        experiment.log_multiple_params(hyperparams)

    text_field = data.Field(tokenize=custom_tokenizer,
                            fix_length=args.sentence_len,
                            unk_token='<**UNK**>')
    label_field = data.Field(sequential=False, unk_token=None)
    pair_field = data.RawField()

    print('Loading MultiNLI Dataset')
    train = get_dataset(text_field, label_field, pair_field, 'train')
    val = get_dataset(text_field, label_field, pair_field, 'val_matched')

    text_field.build_vocab(train, max_size=args.max_vocab_size)
    label_field.build_vocab(train, val)

    if args.word_vectors:
        text_field.vocab.load_vectors(args.word_vectors)

    device = -1
    if args.cuda:
        device = None

    print('Generating Iterators')
    train_iter, val_iter = data.BucketIterator.splits(
        (train, val),
        batch_size=args.batch_size,
        shuffle=True,
        sort_key=sort_key,
        device=device)
    train_iter.repeat = False

    args.n_embed = len(text_field.vocab)
    args.d_out = len(label_field.vocab)
    args.n_cells = args.n_layers
    if args.bidir:
        args.n_cells *= 2
    print(args)

    if args.load_model:
        model = torch.load(args.load_model)
    else:
        model = MODELS[args.model_type](args)
        print('Loading Word Embeddings')
        model.embed.weight.data = text_field.vocab.vectors

    criterion = nn.CrossEntropyLoss()

    if args.cuda:
        model = model.cuda()
        criterion = criterion.cuda()
    print(model)

    val_correct, val_loss = evaluate(val_iter, model, criterion)
    val_accuracy = 100 * val_correct / len(val)
    print(val_accuracy)
            experiment.log_parameter("Experiment name",
                                     "Testing different layers")
            experiment.log_parameter("Latent dim", latent_shape[0])

            groups = data_reader.groups
            all_scores = []

            for i in range(3):
                ae = Autoencoder(config[i]["encoder"],
                                 config[i]["decoder"],
                                 input_shape=input_shapes[i],
                                 latent_shape=latent_shape,
                                 loss="mean_squared_error",
                                 optimizer_params=None)

                experiment.log_multiple_params(config[i])

                scores = ae.cross_validate(data[i],
                                           groups,
                                           experiment=experiment,
                                           epochs=10000,
                                           n_splits=4,
                                           log_prefix=f"dataset_{i}_")

                all_scores.append(scores)

                mean_scores = np.mean(scores)

                experiment.log_metric(f"mean_scores_{i}", mean_scores)

                experiment.log_other(f"scores_{i}", scores)
Ejemplo n.º 32
0
class Trainer:
    def __init__(self):
        # Simple training script for training a RetinaNet network.

        # Dataset type, must be one of csv or coco.
        self.dataset = 'coco'

        # Path to COCO directory
        self.coco_path = './data'

        # Path to file containing training annotations (see readme)
        self.csv_train = None

        # Path to file containing class list (see readme)
        self.csv_classes = None

        # Path to file containing validation annotations (optional, see readme)
        self.csv_val = None

        # Resnet depth, must be one of 18, 34, 50, 101, 152
        self.depth = 50

        # batch_size
        self.bs = 8

        # learning rate
        self.lr = 1e-5

        # Number of epochs
        self.epochs = 10

        # set device
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')

        # set focal loss
        self.focal_loss = losses.FocalLoss()

        # module calcurating nms
        self.nms = NMS(BBoxTransform, ClipBoxes)

        # index of the saving model
        self.save_name = 2

        # use comet_ml
        self.cml = True

        # classification_loss
        self.cls_loss_meter = AverageMeter()

        # regression_loss
        self.rgrs_loss_meter = AverageMeter()

        self.set_comet_ml()

    def set_comet_ml(self):
        params = {
            'epochs': self.epochs,
            'batch_size': self.bs,
            'lr': self.lr,
            'resnet_depth': self.depth,
            'save_name': self.save_name,
        }

        if self.cml:
            self.experiment = Experiment(api_key="xK18bJy5xiPuPf9Dptr43ZuMk",
                                         project_name="retinanet-coco",
                                         workspace="tanimutomo")
        else:
            self.experiment = None

        if self.cml:
            self.experiment.log_multiple_params(params)

    def set_dataset(self):
        # Create the data loaders
        if self.dataset == 'coco':

            if self.coco_path is None:
                raise ValueError(
                    'Must provide --coco_path when training on COCO,')

            dataset_train = CocoDataset(self.coco_path,
                                        set_name='train2017',
                                        transform=transforms.Compose([
                                            Normalizer(),
                                            Augmenter(),
                                            Resizer()
                                        ]))
            dataset_val = CocoDataset(self.coco_path,
                                      set_name='val2017',
                                      transform=transforms.Compose(
                                          [Normalizer(),
                                           Resizer()]))

        elif self.dataset == 'csv':

            if self.csv_train is None:
                raise ValueError(
                    'Must provide --csv_train when training on COCO,')

            if self.csv_classes is None:
                raise ValueError(
                    'Must provide --csv_classes when training on COCO,')

            dataset_train = CSVDataset(train_file=self.csv_train,
                                       class_list=self.csv_classes,
                                       transform=transforms.Compose([
                                           Normalizer(),
                                           Augmenter(),
                                           Resizer()
                                       ]))

            if self.csv_val is None:
                dataset_val = None
                print('No validation annotations provided.')
            else:
                dataset_val = CSVDataset(train_file=self.csv_val,
                                         class_list=self.csv_classes,
                                         transform=transforms.Compose(
                                             [Normalizer(),
                                              Resizer()]))

        else:
            raise ValueError(
                'Dataset type not understood (must be csv or coco), exiting.')

        return dataset_train, dataset_val

    def set_models(self, dataset_train):
        # Create the model
        if self.depth == 18:
            retinanet = model.resnet18(num_classes=dataset_train.num_classes(),
                                       pretrained=True)
        elif self.depth == 34:
            retinanet = model.resnet34(num_classes=dataset_train.num_classes(),
                                       pretrained=True)
        elif self.depth == 50:
            retinanet = model.resnet50(num_classes=dataset_train.num_classes(),
                                       pretrained=True)
        elif self.depth == 101:
            retinanet = model.resnet101(
                num_classes=dataset_train.num_classes(), pretrained=True)
        elif self.depth == 152:
            retinanet = model.resnet152(
                num_classes=dataset_train.num_classes(), pretrained=True)
        else:
            raise ValueError(
                'Unsupported model depth, must be one of 18, 34, 50, 101, 152')

        if torch.cuda.device_count() > 1:
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
            retinanet = nn.DataParallel(retinanet)

        self.retinanet = retinanet.to(self.device)
        self.retinanet.training = True
        self.optimizer = optim.Adam(self.retinanet.parameters(), lr=self.lr)

        # This lr_shceduler reduce the learning rate based on the models's validation loss
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,
                                                              patience=3,
                                                              verbose=True)

        self.loss_hist = collections.deque(maxlen=500)

        # self.retinanet.train()
        # self.retinanet.freeze_bn()

    def iterate(self):
        dataset_train, dataset_val = self.set_dataset()
        sampler = AspectRatioBasedSampler(dataset_train,
                                          batch_size=self.bs,
                                          drop_last=False)
        dataloader_train = DataLoader(dataset_train,
                                      num_workers=0,
                                      collate_fn=collater,
                                      batch_sampler=sampler)

        # if dataset_val is not None:
        #     sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False)
        #     dataloader_val = DataLoader(dataset_val, num_workers=0, collate_fn=collater, batch_sampler=sampler_val)
        print('Num training images: {}'.format(len(dataset_train)))

        self.set_models(dataset_train)

        for epoch_num in range(self.epochs):
            epoch_loss = []

            metrics = {
                'classification_loss': self.cls_loss_meter.avg,
                'regression_loss': self.rgrs_loss_meter.avg,
                'entire_loss':
                self.cls_loss_meter.avg + self.rgrs_loss_meter.avg
            }

            if self.experiment is not None:
                self.experiment.log_multiple_metrics(metrics, step=epoch_num)

            self.retinanet.train()
            self.retinanet.module.freeze_bn()

            epoch_loss = self.train(epoch_num, epoch_loss, dataloader_train)

            self.retinanet.eval()

            self.evaluate(epoch_num, dataset_val)

            torch.save(
                self.retinanet.state_dict(),
                os.path.join(
                    './saved_models',
                    'model{}_final_{}.pth'.format(self.save_name, epoch_num)))
            # torch.save(self.retinanet.module, '{}_self.retinanet_{}.pt'.format(self.dataset, epoch_num))

            # self.retinanet.load_state_dict(torch.load("./saved_models/model_final_0.pth"))

            self.scheduler.step(np.mean(epoch_loss))
            self.retinanet.eval()

    def train(self, epoch_num, epoch_loss, dataloader_train):
        for iter_num, data in enumerate(dataloader_train):
            try:
                self.optimizer.zero_grad()

                input = data['img'].to(self.device).float()
                annot = data['annot'].to(self.device)

                regression, classification, anchors = self.retinanet(input)

                classification_loss, regression_loss = self.focal_loss.calcurate(
                    classification, regression, anchors, annot)

                classification_loss = classification_loss.mean()
                regression_loss = regression_loss.mean()
                self.cls_loss_meter.update(classification_loss)
                self.rgrs_loss_meter.update(regression_loss)

                loss = classification_loss + regression_loss

                if bool(loss == 0):
                    continue

                loss.backward()

                torch.nn.utils.clip_grad_norm_(self.retinanet.parameters(),
                                               0.1)

                self.optimizer.step()

                self.loss_hist.append(float(loss.item()))

                epoch_loss.append(float(loss.item()))

                torch.nn.utils.clip_grad_norm_(self.retinanet.parameters(),
                                               0.1)
                print(
                    'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}'
                    .format(epoch_num, iter_num, float(classification_loss),
                            float(regression_loss), np.mean(self.loss_hist)))

                del classification_loss
                del regression_loss
            except Exception as e:
                print(e)
                continue

            # if iter_num == 10:
            #     break

        return epoch_loss

    def evaluate(self, epoch_num, dataset_val):
        if self.dataset == 'coco':

            print('Evaluating dataset')

            coco_eval.evaluate_coco(dataset_val, self.retinanet, self.nms,
                                    self.device)

        elif self.dataset == 'csv' and self.csv_val is not None:

            print('Evaluating dataset')

            mAP = csv_eval.evaluate(dataset_val, self.retinanet)
Ejemplo n.º 33
0
def main(unused_args):

    hparams = {
        'datasource': FLAGS.datasource,
        'num_classes_train': FLAGS.num_classes,
        'num_classes_val': FLAGS.num_classes,
        'num_classes_test': FLAGS.num_classes_test,
        'num_shot_train': FLAGS.num_shot_train,
        'num_shot_test': FLAGS.num_shot_test,
        'steps': FLAGS.steps,
        'meta_batch_size': FLAGS.meta_batch_size,
        'meta_lr': FLAGS.meta_lr,
        'notes': FLAGS.notes,
    }
    hparams = check_default_config(hparams)
    if FLAGS.train and not FLAGS.load:
        hparams['mode'] = 'train'
        save_string = [
            hparams['datasource'],
            str(hparams['num_classes_train']) + 'way',
            str(hparams['num_shot_train']) + 'shot',
            strftime('%y%m%d_%H%M'),
        ]
        save_folder = '_'.join(map(str, save_string)) + '/'
        os.makedirs(FLAGS.savepath + save_folder)
        hparams['savepath'] = FLAGS.savepath + save_folder
        save_config(hparams, FLAGS.savepath + save_folder)
    # elif FLAGS.test:
    # 	hparams = load_config(FLAGS.savepath + 'config.json', test=True, notes=FLAGS.notes)

    if FLAGS.comet:
        experiment = Experiment(api_key=os.environ['COMETML_API_KEY'],
                                project_name='meta')
        experiment.log_multiple_params(hparams)

    if FLAGS.train and FLAGS.datasource in [
            'omniglot', 'miniimagenet', 'cifar'
    ]:

        num_shot_train = FLAGS.num_shot_train or 1
        num_shot_test = FLAGS.num_shot_test or 1

        data_generator = DataGenerator(
            datasource=FLAGS.datasource,
            num_classes=FLAGS.num_classes,
            num_samples_per_class=num_shot_train + num_shot_test,
            batch_size=FLAGS.meta_batch_size,
            test_set=False,
        )

        # Tensorflow queue for metatraining dataset
        # metatrain_image_tensor - (batch_size, num_classes * num_samples_per_class, 28 * 28)
        # metatrain_label_tensor - (batch_size, num_classes * num_samples_per_class, num_classes)
        metatrain_image_tensor, metatrain_label_tensor = data_generator.make_data_tensor(
            train=True, load=True, savepath='test.pkl')
        train_inputs = tf.slice(metatrain_image_tensor, [0, 0, 0],
                                [-1, FLAGS.num_classes * num_shot_train, -1])
        test_inputs = tf.slice(metatrain_image_tensor,
                               [0, FLAGS.num_classes * num_shot_train, 0],
                               [-1, -1, -1])
        train_labels = tf.slice(metatrain_label_tensor, [0, 0, 0],
                                [-1, FLAGS.num_classes * num_shot_train, -1])
        test_labels = tf.slice(metatrain_label_tensor,
                               [0, FLAGS.num_classes * num_shot_train, 0],
                               [-1, -1, -1])
        metatrain_input_tensors = {
            'train_inputs':
            train_inputs,  # batch_size, num_classes * (num_samples_per_class - update_batch_size), 28 * 28
            'train_labels':
            train_labels,  # batch_size, num_classes * (num_samples_per_class - update_batch_size), num_classes
            'test_inputs':
            test_inputs,  # batch_size, num_classes * update_batch_size, 28 * 28
            'test_labels':
            test_labels,  # batch_size, num_classes * update_batch_size, num_classes
        }

        data_generator = DataGenerator(
            datasource=FLAGS.datasource,
            num_classes=hparams['num_classes_val'],
            num_samples_per_class=num_shot_train + num_shot_test,
            batch_size=16,
            test_set=False,
        )

        # Tensorflow queue for metavalidation dataset
        metaval_image_tensor, metaval_label_tensor = data_generator.make_data_tensor(
            train=False)
        train_inputs = tf.slice(
            metaval_image_tensor, [0, 0, 0],
            [-1, hparams['num_classes_val'] * num_shot_train, -1])
        test_inputs = tf.slice(
            metaval_image_tensor,
            [0, hparams['num_classes_val'] * num_shot_train, 0], [-1, -1, -1])
        train_labels = tf.slice(
            metaval_label_tensor, [0, 0, 0],
            [-1, hparams['num_classes_val'] * num_shot_train, -1])
        test_labels = tf.slice(
            metaval_label_tensor,
            [0, hparams['num_classes_val'] * num_shot_train, 0], [-1, -1, -1])
        metaval_input_tensors = {
            'train_inputs':
            train_inputs,  # batch_size, num_classes * (num_samples_per_class - update_batch_size), 28 * 28
            'train_labels':
            train_labels,  # batch_size, num_classes * (num_samples_per_class - update_batch_size), num_classes
            'test_inputs':
            test_inputs,  # batch_size, num_classes * update_batch_size, 28 * 28
            'test_labels':
            test_labels,  # batch_size, num_classes * update_batch_size, num_classes
        }

        # Graph for metatraining
        # using scope reuse=tf.AUTO_REUSE, not sure if this is the best way to do it
        if FLAGS.datasource == 'miniimagenet':
            # model_metatrain = CNN_MiniImagenet('model', n_way=FLAGS.num_classes, layers=4, input_tensors=metatrain_input_tensors)
            model_metatrain = CNN_miniimagenet(
                'model',
                num_classes=FLAGS.num_classes,
                input_tensors=metatrain_input_tensors)
        elif FLAGS.datasource == 'cifar':
            model_metatrain = CNN_cifar('model',
                                        num_classes=FLAGS.num_classes,
                                        input_tensors=metatrain_input_tensors)
        else:
            model_metatrain = CNN_omniglot(
                'model',
                num_classes=FLAGS.num_classes,
                input_tensors=metatrain_input_tensors)
            # model_metatrain = CNN2('model', n_way=FLAGS.num_classes, layers=4, input_tensors=metatrain_input_tensors)

        # Graph for metavalidation
        if FLAGS.datasource == 'miniimagenet':
            # model_metaval = CNN_MiniImagenet('model', n_way=FLAGS.num_classes, layers=4, input_tensors=metaval_input_tensors)
            model_metaval = CNN_miniimagenet(
                'model',
                num_classes=hparams['num_classes_val'],
                input_tensors=metaval_input_tensors)
        elif FLAGS.datasource == 'cifar':
            model_metaval = CNN_cifar('model',
                                      num_classes=hparams['num_classes_val'],
                                      input_tensors=metaval_input_tensors)
        else:
            model_metaval = CNN_omniglot('model',
                                         num_classes=FLAGS.num_classes,
                                         input_tensors=metaval_input_tensors)
            # model_metaval = CNN2('model', n_way=FLAGS.num_classes, layers=4, input_tensors=metaval_input_tensors)

        sess = tf.InteractiveSession()
        tf.global_variables_initializer().run()
        if FLAGS.load:
            model_metatrain.load(sess, FLAGS.savepath, verbose=True)
            model_metaval.load(sess, FLAGS.savepath, verbose=True)
        tf.train.start_queue_runners()

        saved_metaval_loss = np.inf
        steps = FLAGS.steps or 40000
        try:
            for step in np.arange(steps):
                # metatrain_loss, metatrain_accuracy, _, _ = sess.run([model_metatrain.loss, model_metatrain.test_accuracy, model_metatrain.optimize, model_metatrain.ae_optimize], {model_metatrain.is_training: True})
                metatrain_loss, metatrain_accuracy, _ = sess.run(
                    [
                        model_metatrain.loss, model_metatrain.test_accuracy,
                        model_metatrain.optimize
                    ], {model_metatrain.is_training: True})
                if step > 0 and step % FLAGS.print_every == 0:
                    # model_metatrain.writer.add_summary(metatrain_summary, step)
                    print('Step #{} - Loss : {:.3f} - Acc : {:.3f}'.format(
                        step, metatrain_loss, metatrain_accuracy))
                    if FLAGS.comet:
                        experiment.log_metric("train_loss",
                                              metatrain_loss,
                                              step=step)
                        experiment.log_metric("train_accuracy",
                                              metatrain_accuracy,
                                              step=step)
                if step > 0 and (step % FLAGS.validate_every == 0
                                 or step == (steps - 1)):
                    if step == (steps - 1):
                        print('Training complete!')
                    metaval_loss, metaval_accuracy = sess.run(
                        [model_metaval.loss, model_metaval.test_accuracy],
                        {model_metaval.is_training: False})
                    # model_metaval.writer.add_summary(metaval_summary, step)
                    print('Validation Results - Loss : {:.3f} - Acc : {:.3f}'.
                          format(metaval_loss, metaval_accuracy))
                    if FLAGS.comet:
                        experiment.log_metric("val_loss",
                                              metaval_loss,
                                              step=step)
                        experiment.log_metric("val_accuracy",
                                              metaval_accuracy,
                                              step=step)
                    if metaval_loss < saved_metaval_loss:
                        saved_metaval_loss = metaval_loss
                        if not FLAGS.load:
                            model_metatrain.save(sess,
                                                 FLAGS.savepath + save_folder,
                                                 global_step=step,
                                                 verbose=True)
                        else:
                            model_metatrain.save(sess,
                                                 FLAGS.savepath,
                                                 global_step=step,
                                                 verbose=True)
        # Catch Ctrl-C event and allow save option
        except KeyboardInterrupt:
            response = raw_input(
                '\nSave latest model at Step #{}? (y/n)\n'.format(step))
            if response == 'y':
                model_metatrain.save(sess,
                                     FLAGS.savepath,
                                     global_step=step,
                                     verbose=True)
            else:
                print('Latest model not saved.')

    if FLAGS.test and FLAGS.datasource in [
            'omniglot', 'miniimagenet', 'cifar'
    ]:

        NUM_TEST_SAMPLES = 600

        num_classes_test = FLAGS.num_classes_test or FLAGS.num_classes

        num_shot_train = FLAGS.num_shot_train or 1
        num_shot_test = FLAGS.num_shot_test or 1

        data_generator = DataGenerator(
            datasource=FLAGS.datasource,
            num_classes=num_classes_test,
            num_samples_per_class=num_shot_train + num_shot_test,
            batch_size=1,  # use 1 for testing to calculate stdev and ci95
            test_set=True,
        )

        image_tensor, label_tensor = data_generator.make_data_tensor(
            train=False)
        train_inputs = tf.slice(image_tensor, [0, 0, 0],
                                [-1, num_classes_test * num_shot_train, -1])
        test_inputs = tf.slice(image_tensor,
                               [0, num_classes_test * num_shot_train, 0],
                               [-1, -1, -1])
        train_labels = tf.slice(label_tensor, [0, 0, 0],
                                [-1, num_classes_test * num_shot_train, -1])
        test_labels = tf.slice(label_tensor,
                               [0, num_classes_test * num_shot_train, 0],
                               [-1, -1, -1])
        input_tensors = {
            'train_inputs':
            train_inputs,  # batch_size, num_classes * (num_samples_per_class - update_batch_size), 28 * 28
            'train_labels':
            train_labels,  # batch_size, num_classes * (num_samples_per_class - update_batch_size), num_classes
            'test_inputs':
            test_inputs,  # batch_size, num_classes * update_batch_size, 28 * 28
            'test_labels':
            test_labels,  # batch_size, num_classes * update_batch_size, num_classes
        }

        if FLAGS.datasource == 'miniimagenet':
            # model = CNN_MiniImagenet('model', n_way=FLAGS.num_classes, layers=4, input_tensors=input_tensors)
            model = CNN_miniimagenet('model',
                                     num_classes=FLAGS.num_classes,
                                     input_tensors=input_tensors)
        elif FLAGS.datasource == 'cifar':
            model = CNN_cifar('model',
                              num_classes=FLAGS.num_classes,
                              input_tensors=input_tensors)
        else:
            model = CNN_omniglot('model',
                                 num_classes=FLAGS.num_classes,
                                 input_tensors=input_tensors)
            # model = CNN2('model', n_way=FLAGS.num_classes, layers=4, input_tensors=input_tensors)

        sess = tf.InteractiveSession()
        tf.global_variables_initializer().run()
        model.load(sess, FLAGS.savepath, verbose=True)
        tf.train.start_queue_runners()

        # BEGIN PLOT
        if FLAGS.plot:
            activations, labels = sess.run(
                [model.train_features, model.train_labels],
                {model.is_training: False})
            activations = activations.reshape(
                [num_shot_train * FLAGS.num_classes, -1])
            from sklearn.manifold import TSNE
            from sklearn.decomposition import PCA
            pca = PCA(50)
            print('Compressing with PCA...')
            activations_50dim = pca.fit_transform(activations)
            tsne = TSNE()
            print('Compressing with tSNE...')
            activations_2dim = tsne.fit_transform(activations_50dim)
            labels = np.argmax(labels, axis=1)
            fig, ax = plt.subplots()
            for i in np.arange(FLAGS.num_classes):
                ax.scatter(activations_2dim[np.where(labels == i)][:, 0],
                           activations_2dim[np.where(labels == i)][:, 1],
                           s=5.)
            plt.show()
            quit()
        # END PLOT

        accuracy_list = []

        for task in np.arange(NUM_TEST_SAMPLES):
            accuracy = sess.run(model.test_accuracy,
                                {model.is_training: False})
            accuracy_list.append(accuracy)
            if task > 0 and task % 100 == 0:
                print('Metatested on {} tasks...'.format(task))

        avg = np.mean(accuracy_list)
        stdev = np.std(accuracy_list)
        ci95 = 1.96 * stdev / np.sqrt(NUM_TEST_SAMPLES)

        print('\nEnd of Test!')
        print('Accuracy                : {:.4f}'.format(avg))
        print('StdDev                  : {:.4f}'.format(stdev))
        print('95% Confidence Interval : {:.4f}'.format(ci95))

        if FLAGS.comet:
            experiment.log_metric("test_accuracy_mean", avg)
            experiment.log_metric("test_accuracy_stdev", stdev)
            experiment.log_metric("test_accuracy_ci95", ci95)

    if FLAGS.train and FLAGS.datasource in ['sinusoid', 'multimodal', 'step']:

        num_shot_train = FLAGS.num_shot_train or 10
        num_shot_test = FLAGS.num_shot_test or 10

        data_generator = DataGenerator(
            datasource=FLAGS.datasource,
            num_classes=None,
            num_samples_per_class=num_shot_train + num_shot_test,
            batch_size=FLAGS.meta_batch_size,
            test_set=None,
        )

        model = FFN('model')

        sess = tf.InteractiveSession()
        tf.global_variables_initializer().run()

        saved_loss = np.inf
        steps = FLAGS.steps or 50000
        try:
            for step in np.arange(steps):
                if FLAGS.datasource == 'multimodal':
                    batch_x, batch_y, amp, phase, slope, intercept, modes = data_generator.generate(
                    )
                    amp = amp * modes + (modes == False).astype(np.float32)
                elif FLAGS.datasource == 'step':
                    batch_x, batch_y, start_step = data_generator.generate()
                    amp = np.ones(batch_x.shape[0])
                else:
                    batch_x, batch_y, amp, phase = data_generator.generate()
                    amp = np.ones(batch_x.shape[0])
                train_inputs = batch_x[:, :num_shot_train, :]
                train_labels = batch_y[:, :num_shot_train, :]
                test_inputs = batch_x[:, num_shot_train:, :]
                test_labels = batch_y[:, num_shot_train:, :]
                feed_dict = {
                    model.train_inputs: train_inputs,
                    model.train_labels: train_labels,
                    model.test_inputs: test_inputs,
                    model.test_labels: test_labels,
                    model.amp: amp,  # use amplitude to scale loss
                }

                metatrain_postloss, _ = sess.run([model.loss, model.optimize],
                                                 feed_dict)
                if step > 0 and step % FLAGS.print_every == 0:
                    # model.writer.add_summary(metatrain_summary, step)
                    print('Step #{} - PreLoss : {:.3f} - PostLoss : {:.3f}'.
                          format(step, 0., metatrain_postloss))
                    if step == (steps - 1):
                        print('Training complete!')
                    if metatrain_postloss < saved_loss:
                        saved_loss = metatrain_postloss
                        model.save(sess,
                                   FLAGS.savepath + save_folder,
                                   global_step=step,
                                   verbose=True)
        # Catch Ctrl-C event and allow save option
        except KeyboardInterrupt:
            response = raw_input(
                '\nSave latest model at Step #{}? (y/n)\n'.format(step))
            if response == 'y':
                model.save(sess,
                           FLAGS.savepath,
                           global_step=step,
                           verbose=True)
            else:
                print('Latest model not saved.')

    if FLAGS.test and FLAGS.datasource in ['sinusoid', 'multimodal', 'step']:

        num_shot_train = FLAGS.num_shot_train or 10

        data_generator = DataGenerator(
            datasource=FLAGS.datasource,
            num_classes=None,
            num_samples_per_class=num_shot_train,
            batch_size=1,
            test_set=None,
        )

        model = FFN('model',
                    num_train_samples=num_shot_train,
                    num_test_samples=50)

        sess = tf.InteractiveSession()
        model.load(sess, FLAGS.savepath, verbose=True)

        if FLAGS.datasource == 'multimodal':
            train_inputs, train_labels, amp, phase, slope, intercept, modes = data_generator.generate(
            )
            amp = amp * modes + (modes == False).astype(np.float32)
            x = np.arange(-5., 5., 0.2)
            if modes[0] == 0:
                y = slope * x + intercept
            else:
                y = amp * np.sin(x - phase)
        elif FLAGS.datasource == 'step':
            train_inputs, train_labels, start_step = data_generator.generate()
            x = np.arange(-5., 5., 0.2)
            y = np.ones_like(x) - (x < start_step).astype(
                np.float32) - (x > (start_step + 2)).astype(np.float32)
        else:
            train_inputs, train_labels, amp, phase = data_generator.generate()
            amp = 5.
            phase = 0.
            x = np.arange(5., 15., 0.2).reshape(1, -1, 1)
            y = amp * np.sin(x - phase).reshape(1, -1, 1)
            train_inputs = np.arange(5., 10., .5).reshape(1, -1, 1)
            # train_inputs = np.arange(-5., 0., .5).reshape(1, -1, 1)
            train_labels = amp * np.sin(train_inputs - phase)

        feed_dict = {
            model.train_inputs: train_inputs,
            model.train_labels: train_labels,
            model.test_inputs: x.reshape(1, -1, 1),
            model.test_labels: y.reshape(1, -1, 1),
        }

        postprediction, postloss = sess.run(
            [model.predictions, model.plain_loss], feed_dict)

        print(postloss)

        fig, ax = plt.subplots()
        ax.plot(x.reshape(-1),
                y.reshape(-1),
                color='#2c3e50',
                linewidth=0.8,
                label='Truth')
        ax.scatter(train_inputs.reshape(-1),
                   train_labels.reshape(-1),
                   color='#2c3e50',
                   label='Training Set')
        ax.plot(x.reshape(-1),
                postprediction.reshape(-1),
                label='Prediction',
                color='#e74c3c',
                linestyle='--')
        ax.legend()
        ax.set_title(postloss)
        plt.show()
        if FLAGS.filename is not None:
            fig.savefig('figures/' + FLAGS.filename + '.png',
                        dpi=72,
                        bbox_inches='tight')