コード例 #1
0
def main(config):
    if config.prepare_data:
        print('==> Data prepration')
        save_data(config)
        return
    data_iterator = load_data_iterator(config)
    model = build_model(config)
    trainer = Trainer(config, model, data_iterator)
    # evaluator = Eval()
    if config.train:
        assert config.eval_only == False
        print('Training Started')
        loss = []
        if config.load_model:
            trainer.load_checkpoint()
            path = os.path.join(config.exp_dir, 'loss.pkl')
            with open(path, 'rb') as f:
                loss = pickle.load(f)
        for epoch in range(config.num_epochs):
            train_losses = trainer.iter()
            valid_losses = trainer.eval(mode='valid')
            test_losses = trainer.eval(mode='test')
            loss.append((train_losses, valid_losses, test_losses))
            if config.save_periodic:
                if epoch % config.save_every == 0:
                    trainer.save_checkpoint(epoch)
                    print(f'Model saved for epoch = {epoch}')
        if config.dump_loss:
            path = os.path.join(config.exp_dir, 'loss.pkl')
            with open(path, 'wb') as f:
                pickle.dump(loss, f)

    elif config.eval_only:
        # evaluator
        print('Evaluation')
コード例 #2
0
                def __init__(self):
                        import keras.backend.tensorflow_backend as K
                        config = tf.ConfigProto()
                        config.gpu_options.allow_growth = True
                        self.session = tf.Session(config=config)
                        K.set_session(self.session)
                        K.manual_variable_initialization(True)
                        self.lp = K.learning_phase()
                        
                        self.model = build_model()
                        self.model._make_predict_function()
                        self.model.summary()

                        self.predict_model = build_model()
                        self.predict_model._make_predict_function()
        
                        self.graph = self._build_graph(self.model)
                        self.popart = self.popart_ops()
                        self.session.run(tf.global_variables_initializer())
                        self.default_graph = tf.get_default_graph()
                        self.sync_weight()
                        self.tensorboard_setting()
                        self.default_graph.finalize()
                        self.taskqueue = queue.Queue(maxsize=16)
コード例 #3
0
def predict_from_weights(input_path, weight_path, limit=10):
    # VGG for the perceptual loss
    base_model = VGG19(weights="imagenet", include_top=False,
                       input_shape=img_input_shape)

    perceptual_model = Model(inputs=base_model.input,
                             outputs=[base_model.get_layer("block2_pool").output,
                                      base_model.get_layer("block5_pool").output],
                             name="VGG")

    autoencoder, _ = build_model(perceptual_model)

    if os.path.isfile(weight_path):
        print("loading weights from {}".format(weight_path))
        autoencoder.load_weights(weight_path)
    else:
        raise Exception("weight path does not exist")

    predict_from_ae(input_path, autoencoder, limit)
コード例 #4
0
ファイル: main.py プロジェクト: Mehrazin/Symbolic-Mathematics
def main(args):
    """
    Main function contains the main procedure of training and evaluation.
    It take args as its argument
        args: a parser object that contains main configurations of the current experiment
    """
    config = Config(args)
    set_seed(config)
    logger = config.get_logger()
    env = EnvHndler(config)
    # Clear the cash of CUDA
    torch.cuda.empty_cache()
    model = build_model(config)
    trainer = Trainer(config, env, model)
    evaluator = Evaluator(trainer)
    # evaluation
    if config.eval_only:
        scores = evaluator.run_all_evals()
        for k, v in scores.items():
            logger.info("%s -> %.6f" % (k, v))
        logger.info("__log__:%s" % json.dumps(scores))
        exit()

    # training
    for epoch in range(config.max_epoch):

        logger.info("============ Starting epoch %i ... ============" %
                    trainer.epoch)

        trainer.n_equations = 0
        torch.cuda.empty_cache()
        while trainer.n_equations < trainer.epoch_size:
            # training steps
            torch.cuda.empty_cache()
            trainer.enc_dec_step()
            trainer.iter()

        logger.info("============ End of epoch %i ============" %
                    trainer.epoch)

        # evaluate perplexity
        scores = evaluator.run_all_evals()

        # print / JSON log
        for k, v in scores.items():
            logger.info("%s -> %.6f" % (k, v))

        logger.info("__log__:%s" % json.dumps(scores))

        # end of epoch
        trainer.save_best_model(scores)
        trainer.save_periodic()
        trainer.end_epoch(scores)
        if epoch % 10 == 0:
            while True:
                t = input("Continue training? [y/n]")
                if t not in ['y', 'n']:
                    print('Invalid input')
                    continue
                elif t == 'y':
                    break
                else:
                    exit()
コード例 #5
0
    "optimizer": optimizer_params[i],
    "earlystopping": earlystopping_params[j],
    "loss_weights": loss_params[k]
} for (i, j, k) in [
    x for x in itertools.product(optimizer_params, earlystopping_params,
                                 loss_params)
]]

for idx, exp in enumerate(experiment):
    print("starting experiment {} with {}".format(idx, exp))

    # create sub_experiment file
    sub_exp_path = exp_path + '/' + str(idx)
    os.mkdir(sub_exp_path)

    autoencoder, _ = build_model(perceptual_model)

    load_model = False
    if load_model:
        weight_path = "weights.hdf5"
        print("loading weights from {}".format(weight_path))
        autoencoder.load_weights(weight_path)

    optimizer = exp["optimizer"][0](**exp["optimizer"][1])
    loss_weights = exp["loss_weights"]

    autoencoder.compile(optimizer=optimizer,
                        loss={
                            "clipping_layer_1": loss,
                            "rounding_layer_1": entropy,
                            "VGG_block_2": perceptual_2,
コード例 #6
0
training_df, testing_df, training_matrix, testing_matrix = Preprocess.data_separation('2010-01-01', '2017-01-01',
                                                                                      '2017-01-01', '2018-01-01', ticker)
normalised_data = Preprocess.data_normalisation(ticker)
normalised_training_data = Preprocess.normalised_training(training_matrix, normalised_data)
normalised_testing_data = Preprocess.normalised_testing(testing_matrix, normalised_data)

# Parameters for Time Series Model
feature_length = 10
window_length = 20
batch_size = 256

# Create Training samples
x_train, y_train = Preprocess.create_samples(normalised_training_data, 20, 10)

# Building Time-series model
model = build_model([batch_size, window_length, feature_length])

# Back-test output
x_test, y_test = Preprocess.create_samples(normalised_testing_data, 20, 10)

import datetime  # For datetime objects

# Import the backtrader platform
import backtrader as bt
from Trading_Strategy import SentimentStrategy


if __name__ == '__main__':
    cerebro = bt.Cerebro()
    cerebro.addstrategy(SentimentStrategy)
コード例 #7
0
import pickle
import numpy as np
from Model import build_model
import json
import params

with open('preprocessing.pkl', 'rb') as f:
    preprocessing = pickle.load(f)

with open('test_data.pkl', 'rb') as f:
    test_data = pickle.load(f)

model = build_model(preprocessing.tokenizer)
model.load_weights('Models/model_aws.hdf5')

preprocessed_test_data = list()
for sample in test_data:
    if sample[3] < sample[4] < params.CONTEXT_LEN or sample[3] == -1:
        preprocessed_test_data.append(preprocessing.text_to_seq_sample(sample))

result = {}
i = 0
for sample in preprocessed_test_data:
    id_ = sample[6]
    pred = model.predict([sample[1].reshape(1, params.QUESTION_LEN), sample[0].reshape(1, params.CONTEXT_LEN)])

    p_start = int(np.argmax(pred[0]))

    p_end = int(np.argmax(pred[1]))
    is_noans = np.round(pred[2])
    result[id_] = preprocessing.target_to_words(sample[0].reshape(-1, ), p_start, p_end, is_noans)
コード例 #8
0
import gym
import sys

train = False

if len(sys.argv) > 1 and sys.argv[1] == "train":
    train = True

env = gym.make("CartPole-v0")

#builds and trains model
if train:

    x_data, y_data = get_training_data()

    model = build_model()
    history = model.fit(x_data, y_data, epochs=100)

    model.save('cartpolemodel.h5')

    # creates graph displaying loss rate
    plt.plot(history.history['mean_squared_error'])
    plt.title("Model Loss")
    plt.xlabel("Epoch")
    plt.ylabel("Loss")

    plt.show()

#loads model
else:
    model = load_model('cartpolemodel.h5')
コード例 #9
0
class Bunch(object):
    def __init__(self, adict):
        self.__dict__.update(adict)


if __name__ == "__main__":
    logging.basicConfig(format='%(asctime)s : %(message)s',
                        level=logging.DEBUG)

    init_checkpoint = '/u/lupeng/Project/code/Discourse_summ/saved/octal14/transformer_cnndm_01'
    with open(os.path.join(init_checkpoint, 'config.json'), 'r') as fjson:
        argparse_dict = json.load(fjson)
        checkpoint = torch.load(os.path.join(init_checkpoint, 'checkpoint'))
    args = Bunch(argparse_dict)
    model = build_model(args, None)
    model.load_state_dict(checkpoint['model_state_dict'])
    params_senteval['encoder'] = model

    logging.info('start evaluating...')
    se = senteval.engine.SE(params_senteval, batcher)
    transfer_tasks = [
        'CR', 'MR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC', 'SNLI',
        'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
        'ImageCaptionRetrieval', 'STS12', 'STS13', 'STS14', 'STS15', 'STS16',
        'Length', 'WordContent', 'Depth', 'TopConstituents', 'BigramShift',
        'Tense', 'SubjNumber', 'ObjNumber', 'OddManOut',
        'CoordinationInversion'
    ]
    # result = se.eval(transfer_tasks)
    # # logging.info(f'result of {task}')