Пример #1
0
def main(args):
    """ Main entry point of the app """
    # Test with pretrained model
    if args.test:
        from src.test import Test

        dataset_pred = np.loadtxt(absolute_file_path('../datasets/pred1.csv'), delimiter=",")
        x_predict=dataset_pred[:,:2]
        y_predict=dataset_pred[:,2:]

        Test().test(x_predict,y_predict)

    # Train new model
    elif args.train:
        from src.train import Train

        print('Loading datasets')
        # load training dataset
        dataset_train = np.loadtxt(absolute_file_path('../datasets/train_2dof.csv'), delimiter=",")
        x_train=dataset_train[:1000,:2] # (input vector) first two columns are end effector states
        y_train=dataset_train[:1000,2:] # (output vector) second and third columns are joint angles
        print(x_train.shape)
        # load test dataset
        dataset_test = np.loadtxt(absolute_file_path('../datasets/test_2dof.csv'), delimiter=",")
        x_test=dataset_train[200:300,:2]
        y_test=dataset_train[200:300,2:]
        #x_test=dataset_test[:,:2]
        #y_test=dataset_test[:,2:]

        Train().train(x_train,y_train,x_test,y_test)
    
    else:
        parser.print_help()
Пример #2
0
 def init_train_class(self):
     try:
         self.__setattr__("train_class", Train(DATA_FILE))
     except PermissionError:
         print("Check the data file permissions silly...")
         return False
     except RuntimeError:
         print("I'm not going to do much with an empty training set...")
         return False
Пример #3
0
    def test_model(self):
        # Exporting test data and model
        test_data = pd.read_csv(self.data_path)
        test_data_and_model = Train().execute(test_data, self.model_data_path)

        # Unpickling files
        model_unpickle = open(self.expected_model_path, 'rb')
        model = pkl.load(model_unpickle)
        model_unpickle.close()

        print(test_data_and_model[1])
        print(model)

        self.assertEqual(type((test_data_and_model[1])), type(model))
Пример #4
0
def test_agent():
    state_space_dim = 3
    action_space_dim = 4
    train = Train()
    agent = Agent(state_space_dim=state_space_dim,
                  action_space_dim=action_space_dim,
                  low_action=-1,
                  high_action=1,
                  load=False)
    state = np.random.rand((state_space_dim))[None]
    next_state = np.random.rand((state_space_dim))[None]
    action = agent.get_action(state)
    reward = np.array([1])
    done = np.array([0])
    Q_loss, policy_loss = train(agent, state, next_state, action, reward, done)
    assert (True)
Пример #5
0
 def assert_v(num_epochs, batch_size, rnn_size, embed_dim, seq_length,
              learning_rate, show_every_n_batches, ip):
     assert LooseVersion(tf.__version__) >= LooseVersion(
         '1.0'), 'Please use TensorFlow version 1.0 or newer'
     print('TensorFlow Version: {}'.format(tf.__version__))
     if not tf.test.gpu_device_name():
         warnings.warn(
             'No GPU found. Please use a GPU to train your neural network.')
         print(
             '*** THIS MODEL REQUIRES A GPU TO RUN. YOU ARE USING CPU. ***')
     else:
         print('*****Author: Satyaki Sanyal*****')
         print(
             '***This project must only be used for educational purpose***')
         if ip == 1:
             Train(num_epochs, batch_size, rnn_size, embed_dim, seq_length,
                   learning_rate, show_every_n_batches).train()
         elif ip == 2:
             Test().test()
         print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
Пример #6
0
def main():
    # project parameters
    project_parameters = ProjectParameters().parse()

    assert project_parameters.mode in [
        'train', 'predict', 'predict_gui', 'tuning'
    ], 'please check the mode argument.\nmode: {}\nvalid: {}'.format(
        project_parameters.mode, ['train', 'predict', 'predict_gui', 'tuning'])

    if project_parameters.mode == 'train':
        result = Train(project_parameters=project_parameters).train()
    elif project_parameters.mode == 'predict':
        result = Predict(project_parameters=project_parameters)
        result = Predict(project_parameters=project_parameters).predict(
            filepath=project_parameters.root)
    elif project_parameters.mode == 'predict_gui':
        from src.predict_gui import PredictGUI
        result = PredictGUI(project_parameters=project_parameters).run()
    elif project_parameters.mode == 'tuning':
        result = Tuning(project_parameters=project_parameters,
                        train_class=Train).tuning()
    return result
 def main(self):
     train_graph = tf.Graph()
     save_path = self.path + '/checkpoints/dev'
     source_path = self.path + '/data/small_vocab_en'
     target_path = self.path + '/data/small_vocab_fr'
     PreProcess(source_path, target_path).process_and_save_data()
     _, batch_size, rnn_size, num_layers, encoding_embedding_size, decoding_embedding_size, _, _ = \
         Params().get()
     (source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = \
         self.load_process()
     max_source_sentence_length = max(
         [len(sentence) for sentence in source_int_text])
     with train_graph.as_default():
         input_data, targets, lr, keep_prob = Inputs().get()
         sequence_length = tf.placeholder_with_default(
             max_source_sentence_length, None, name='sequence_length')
         input_shape = tf.shape(input_data)
         train_logits, inference_logits = Seq2seq().seq2seq_model(
             tf.reverse(input_data, [-1]), targets, keep_prob, batch_size,
             sequence_length, len(source_vocab_to_int),
             len(target_vocab_to_int), encoding_embedding_size,
             decoding_embedding_size, rnn_size, num_layers,
             target_vocab_to_int)
         tf.identity(inference_logits, 'logits')
         with tf.name_scope("optimization"):
             cost = tf.contrib.seq2seq.sequence_loss(
                 train_logits, targets,
                 tf.ones([input_shape[0], sequence_length]))
             optimizer = tf.train.AdamOptimizer(lr)
             gradients = optimizer.compute_gradients(cost)
             capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var)
                                 for grad, var in gradients
                                 if grad is not None]
             train_op = optimizer.apply_gradients(capped_gradients)
     Train(source_int_text, target_int_text, train_graph, train_op, cost,
           input_data, targets, lr, sequence_length, keep_prob,
           inference_logits, save_path).train()
Пример #8
0
 def test_column_removed(self):
     # Exporting test data
     test_data = pd.read_csv(self.data_path)
     test_data_and_model = Train().execute(test_data, self.model_data_path)
     self.assertEqual('Survived' in test_data_and_model[0], False)
Пример #9
0
                        help='Size of input image',
                        default=98,
                        type=int)
    parser.add_argument('--batch-size',
                        help='Global Batch Size',
                        default=32,
                        type=int)
    parser.add_argument('--model',
                        help='GAN model Name',
                        default='DCGAN',
                        type=str)
    parser.add_argument('--loss',
                        help='GAN ML loss name',
                        default='GANCrossEntropyLoss',
                        type=str)
    parser.add_argument('--optimizer',
                        help='GAN optimizer name',
                        default='Adam',
                        type=str)

    args = parser.parse_args()

    for metric in args.metrics:
        if metric not in metric_defaults:
            print('Error: unknown metric \'%s\'' % metric)
            sys.exit(1)

    config = Config(**vars(args))
    train = Train()
    train(config)
Пример #10
0
from src.train import Train

# Load diabetes data
train = Train()
X, y = train.load_data()

# Create an DataFrame
df_diabetes = train.create_dataframe(X, y, [
    'age', 'sex', 'bmi', 'bp', 's1', 's2', 's3', 's4', 's5', 's6',
    'progression'
])

# Split data
train_data, test_data = train.split_data(df_diabetes)

# Train the model
model = train.train(train_data)

# Evaluate
train.evaluate(model, test_data)

# Predict!
print(train.predict(model, test_data.drop(["progression"], axis=1)))

# Persist the model
train.persist_model(model, 'model/lr-diabetes.model')
Пример #11
0
#! /usr/bin/python

import argparse


p = argparse.ArgumentParser()
p.add_argument('-t', '--train',  action='store_true', help='train network')
p.add_argument('-o', '--oxford', action='store_true', help='use Oxford-IIIT pet data set')
p.add_argument('-v', '--verify', action='store_true', help='verify against test image')
p.add_argument('-c', '--clean',  action='store_true', help='clean up models')
args = p.parse_args()

if args.train and args.oxford:
    from src.train  import Train
    Train().oxford()
elif args.verify:
    from src.predict import Predict
    Predict().segmentation()
elif args.clean:
    from src.clean import Clean
    Clean().clean()
else:
    p.print_help()

Пример #12
0
               action='store_true',
               help='Run inference test on existing network model')
p.add_argument('--table',
               action='store_true',
               help='Produce table of determinant values')
p.add_argument('--ik',
               action='store_true',
               help='Run inverse kinematic test using sympy solver')

args = p.parse_args()

if args.test:
    from tests.test import Test
    Test().test()

if args.train:
    from src.train import Train
    Train().train()

if args.infer:
    from src.infer import Infer
    Infer().infer()

if args.table:
    from tests.table import Table
    Table().table()

if args.ik:
    from tests.ik import IK
    IK().ik()
Пример #13
0
def train(ctx, episodes, steps):
    logger = Logging([
        'episode', 'rewards', 'running_40_episode_reward', 'episode_length',
        'epsiode_run_time', 'average_step_run_time', 'q_loss', 'p_loss'
    ])

    env, state_space_dim, action_space_dim, state_norm_array, min_action, \
        max_action = setup_env()
    replay_buffer = ReplayBuffer(state_space_dim=state_space_dim,
                                 action_space_dim=action_space_dim,
                                 size=BUFFER_SIZE,
                                 sample_size=BATCH_SIZE)

    # noise_process = OUNoise(
    #     dim=action_space_dim,
    #     sigma=SIGMA,
    #     theta=THETA,
    #     dt=1e-2)

    # noise_process = NormalNoise(
    #     dim=action_space_dim,
    #     sigma=SIGMA)

    # noise_process = LinearSegmentNoise(
    #     dim=action_space_dim,
    #     sigma=SIGMA)

    noise_process = SmoothNoiseND(steps=steps,
                                  dim=action_space_dim,
                                  sigma=SIGMA)

    agent = Agent(state_space_dim,
                  action_space_dim,
                  layer_dims=LAYERS_DIMS,
                  low_action=min_action,
                  high_action=max_action,
                  noise_process=noise_process,
                  tau=TAU,
                  load=True)

    train = Train(discount_factor=DISCOUNT,
                  actor_learning_rate=ACTOR_LR,
                  critic_learning_rate=CRITIC_LR)

    training_rewards = []
    for episode in range(episodes):
        noise_process.reset()
        state = np.array(env.reset(), dtype='float32')
        episode_reward = 0
        step_count = 0
        done = False
        episode_start_time = time()
        step_times = []
        q_losses = []
        p_losses = []
        while not done:
            if step_count >= steps:
                break

            step_time_start = time()
            step_count += 1

            # environment step
            action = agent.get_action(state[None], with_exploration=True)[0]
            next_state, reward, done, _ = env.step(action)
            replay_buffer.push((state, next_state, action, reward, done))
            state = next_state

            # training step
            if replay_buffer.ready:
                states, next_states, actions, \
                    rewards, dones = replay_buffer.sample()
                q_loss, p_loss = \
                    train(agent, states, next_states,
                          actions, rewards, dones)
                agent.track_weights()

            if replay_buffer.ready:
                q_losses.append(q_loss.numpy())
                p_losses.append(p_loss.numpy())
            episode_reward += reward
            step_time_end = time()
            step_times.append(step_time_end - step_time_start)
        training_rewards.append(episode_reward)
        episode_end_time = time()
        epsiode_time = episode_end_time - episode_start_time
        average_step_time = np.array(step_times).mean()
        average_q_loss = np.array(q_losses).mean()
        average_p_loss = np.array(p_losses).mean()
        running_40_episode_reward = np.mean(training_rewards[-40:])

        logger.log([
            episode, episode_reward, running_40_episode_reward, step_count,
            epsiode_time, average_step_time, average_q_loss, average_p_loss
        ])

        agent.save_models()
Пример #14
0
    else:
        print('There is no model')
        return 'No model to use'


if __name__ == '__main__':
    # Reading initial file
    train_data = pd.read_csv("data/initial_data/train.csv", sep=";")
    print('Reading initial file')

    # Preprocessing
    preprocess = Preprocessing()
    train_data = preprocess.execute(train_data)
    print('Preprocessing')

    # Building features
    build_features = BuildFeatures()
    train_data = build_features.execute(train_data)
    print('Building features')

    # Training model
    train = Train()
    train.execute(train_data, "data/initial_data/model.pkl")
    print('Training model')

    # Reading the model
    saved_model = "data/initial_data/model.pkl"
    print('Model loaded')

    app.run(debug=True, host='0.0.0.0')