Ejemplo n.º 1
0
 def predict(self, input):
     Predictor.validateInput(input)
     points = len(input[0])
     if (points < lstmInputPoints):
         adapted = np.zeros((1, lstmInputPoints, 2))
         for i in range(0, points):
             adapted[0, i] = input[0, i]
         input = adapted
     if (points > lstmInputPoints):
         adapted = input[0][points - lstmInputPoints:]
         adapted = adapted.reshape(1, lstmInputPoints, 2)
         input = adapted
     return self.model.predict(input, verbose=0)
Ejemplo n.º 2
0
 def predict(self, input):
     Predictor.validateInput(input)
     input = self.adapt(input)
     return self.model.predict(input, verbose=0)
Ejemplo n.º 3
0
from argparse import ArgumentParser
import json

parser = ArgumentParser(description='Predict translation')
parser.add_argument('--source', type=str)
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--checkpoint', type=str)
parser.add_argument('--num_candidates', type=int, default=1)

args = parser.parse_args()
with open(args.config) as f:
    config = json.load(f)

print('Constructing dictionaries...')
source_dictionary = IndexDictionary.load(config['data_dir'], mode='source', vocabulary_size=config['vocabulary_size'])
target_dictionary = IndexDictionary.load(config['data_dir'], mode='target', vocabulary_size=config['vocabulary_size'])

print('Building model...')
model = build_model(config, source_dictionary.vocabulary_size, target_dictionary.vocabulary_size)

predictor = Predictor(
    preprocess=IndexedInputTargetTranslationDataset.preprocess(source_dictionary),
    postprocess=lambda x: ' '.join([token for token in target_dictionary.tokenify_indexes(x) if token != '<EndSent>']),
    model=model,
    checkpoint_filepath=args.checkpoint
)

for index, candidate in enumerate(predictor.predict_one(args.source, num_candidates=args.num_candidates)):
    print(f'Candidate {index} : {candidate}')
Ejemplo n.º 4
0
from predictors import Predictor
from suggestions import Suggestion
import numpy as np

if __name__ == "__main__":

    age = 28
    sex = 1
    height = 180
    weight = 75
    lean_factor = 0.9
    activity_factor = 1.5
    food_intake = np.array([[101, 500, 1], [102, 120, 2]])
    activity = np.array([[101, height, weight], [105, height, weight]])
    p = Predictor()
    s = Suggestion()
    suggestions, calories_requried = p.suggestions_predictor(
        age, sex, height, weight, lean_factor, activity_factor, food_intake,
        activity)
    # print(len(suggestions))
    # print(suggestions)
    s1 = s.exercise_suggestion(suggestions[0, 0], suggestions[0, 1])
    s2 = s.food_rec(suggestions[0, 2], calories_requried)
    print(s1)
    print(s2)

    # age = input("Age: ")
    # sex = input("Sex: ")
    # height = input("Height in centimeters: ")
    # weight = input("Weight in kilograms: ")
    # lean_factor = input("Lean factor: ")
Ejemplo n.º 5
0
# -------------------------------- predictor training scripts --------------------------------------------------------
if __name__ == '__main__':
    step_list = [1, 8, 16, 24, 50, 100]
    print('training start now: ')
    for n_step in step_list:
        i = -1
        env = EnvironmentState()
        state_set = StateDataset(env,
                                 skip_step=n_step,
                                 size=7000000,
                                 random_torque=True,
                                 remove_torque=False)
        state_set_loader = DataLoader(state_set, batch_size=1024)

        predictor = Predictor(state_set.output_size, 2)
        predictor.stepper.recurrent_step = 0

        avm = AverageMeter()
        with Timer():
            for s in state_set_loader:
                i += 1
                d = s['s1'][:, 12:14] - s['s0'][:, 12:14]  # with torques
                r, loss = predictor.optimize(s['s0'], d)
                avm.log(loss)
                if i % 10 == 0:
                    print(
                        f'epoch {i}: trn loss {avm.value:.4f} {avm.std:.4f}, rmse {avm.value**0.5:.4f}, '
                        f'pred to stat error ratio: {torch.mean(torch.abs(r / d)):.4f} '
                        f'max_d {torch.max(d):.2f} {torch.min(d):.2f}, max_r {torch.max(r):.2f} {torch.min(r):.2f}'
                    )
Ejemplo n.º 6
0
    mode='source',
    vocabulary_size=config['vocabulary_size'])
target_dictionary = IndexDictionary.load(
    config['data_dir'],
    mode='target',
    vocabulary_size=config['vocabulary_size'])

print('Building model...')
model = build_model(config, source_dictionary.vocabulary_size,
                    target_dictionary.vocabulary_size)

predictor = Predictor(
    preprocess=IndexedInputTargetTranslationDataset.preprocess(
        source_dictionary),
    postprocess=lambda x: ' '.join([
        token for token in target_dictionary.tokenify_indexes(x)
        if token != '<EndSent>'
    ]),
    model=model,
    checkpoint_filepath=args.checkpoint,
    beam_size=3)

timestamp = datetime.now()
if args.save_result is None:
    eval_filepath = 'logs/eval-{config}-time={timestamp}.csv'.format(
        config=args.config.replace('/', '-'),
        timestamp=timestamp.strftime("%Y_%m_%d_%H_%M_%S"))
else:
    eval_filepath = args.save_result

evaluator = Evaluator(predictor=predictor, save_filepath=eval_filepath)