コード例 #1
0
ファイル: train_main.py プロジェクト: uber-research/tailr
def main(unused_argv):
    training.run_training(
        dataset=FLAGS.dataset,
        output_type='bernoulli',
        n_y=30,
        n_y_active=1,
        training_data_type='sequential',
        n_concurrent_classes=1,
        lr_init=1e-3,
        lr_factor=1.,
        lr_schedule=[1],
        blend_classes=False,
        train_supervised=False,
        n_steps=25000,
        report_interval=10000,
        knn_values=[10],
        random_seed=1,
        encoder_kwargs={
            'encoder_type': 'multi',
            'n_enc': [1200, 600, 300, 150],
            'enc_strides': [1],
        },
        decoder_kwargs={
            'decoder_type': 'single',
            'n_dec': [500, 500],
            'dec_up_strides': None,
        },
        n_z=32,
        dynamic_expansion=True,
        ll_thresh=-200.0,
        classify_with_samples=False,
        gen_replay_type='dynamic',
        use_supervised_replay=False,
    )
コード例 #2
0
ファイル: example.py プロジェクト: ourownstory/AR-Net
def main(verbose=False, plot=False, save=False, random_ar_param=True):
    # load configuration dicts. Could be implemented to load from JSON instead.
    data_config, model_config, train_config = load_config(
        verbose, random_ar_param)
    # loads randomly generated data. Could be implemented to load a specific dataset instead.
    data = load_data(data_config, verbose, plot)
    # runs training and testing.
    results_dar, stats_dar = run_training(data, model_config, train_config,
                                          verbose)

    # optional printing
    if verbose:
        print(stats_dar)

    # optional plotting
    if plot:
        utils.plot_loss_curve(losses=results_dar["losses"],
                              test_loss=results_dar["test_mse"],
                              epoch_losses=results_dar["epoch_losses"],
                              show=False,
                              save=save)
        utils.plot_weights(model_config["ar"],
                           results_dar["weights"],
                           data["ar"],
                           model_name="AR-Net",
                           save=save)
        utils.plot_results(results_dar, model_name="AR-Net", save=save)
コード例 #3
0
ファイル: fx_lstm.py プロジェクト: esvhd/pytorch_play
def run_model(mode='LSTM'):
    # load data
    print('Load data...')
    train_x, test_x, train_y, test_y = load_data(.2)

    # print('Train X.shape: %s, Train y.shape: %s' %
    #       (train_x.shape, train_y.shape))
    # print('Test X.shape: %s, Test y.shape: %s' %
    #       (test_x.shape, test_y.shape))

    input_dim = train_x.shape[-1]
    output_seq = train_y.shape[0]

    # hyperparams
    num_layers = 4
    hidden_size = input_dim
    dropout = 0.2

    print('Running %s Model...' % mode)
    if mode == 'LSTM':
        model = FXLSTM(input_dim,
                       hidden_size,
                       num_layers,
                       output_seq,
                       dropout=dropout)
    else:
        model = FXGRU(input_dim,
                      hidden_size,
                      num_layers,
                      output_seq,
                      dropout=dropout)

    # x_train = Variable(torch.from_numpy(train_x).float())
    # y_train = Variable(torch.from_numpy(train_y).float())

    # x_test = Variable(torch.from_numpy(test_x).float())
    # y_test = Variable(torch.from_numpy(test_y).float())

    epochs = 500
    lr = .01

    # opt = torch.optim.Adam(model.parameters(), lr=lr)
    # loss_func = nn.MSELoss()
    loss_func = nn.L1Loss()

    loss = training.run_training(model, (train_x, test_x, train_y, test_y),
                                 loss_func,
                                 lr=lr,
                                 epochs=epochs,
                                 print_every=100,
                                 test_loss_func=torch.nn.functional.l1_loss)

    return loss
コード例 #4
0
def runcrossval(idx_split, data_file):
    """
    Runs cross validation after the data splits
    :param idx_split:
    :param data_file:
    :return: test_idx, classifiers
    """
    n_jobs = 8
    print("Training Classifiers")

    print("Reading in data")
    # read in data
    data = pd.read_hdf(data_file)

    print("Splitting the indices")
    # get training and testing indices
    train_idx = idx_split[0]
    test_idx = idx_split[1]

    print("Getting the list of ids")
    # get a list of all the ids
    ids = data.index.levels[0].values
    train_ids = ids[train_idx].tolist()
    test_ids = ids[test_idx].tolist()

    print("Creating the fold directory")
    # define the fold directory filepath
    num1, num2 = np.where(np.in1d(ids, test_ids))[0]
    fold_id = "{0}{1}".format(num1, num2)
    fold_dir = os.path.join(cache_dir, fold_id)
    if not os.path.isdir(fold_dir):
        os.makedirs(fold_dir)

    print("Training subjects: {0}".format(train_ids))
    print("Testing subjects: {0}".format(test_ids))

    train_data = data.loc[train_ids]

    classifiers = run_training(train_data,
                               train_base_clf=True,
                               out_dir=fold_dir,
                               n_jobs=n_jobs)

    return test_idx, classifiers
コード例 #5
0
def test_mnist(params):
    """ runs test program using MLP or CNN for MNIST handwritten digits classification.

    Args:
        params: All parameters as a dict object.

    Returns: The test score.

    """

    # check whether network_name is specified
    assert "network_name" in params, "network_name must be specified"
    print 'Run ', params['problem_name'], 'on', params['target_name'], 'with', params['network_name']

    # Step 1. generate a set of training and test data
    print '\n\nGenerating data...\n'
    (x_train, y_train), (x_test, y_test) = load_mnist_data(params['network_name'])
    data_shape = np.delete(x_train.shape, 0)

    # Step 2. construct a (deep) neural network
    print '\n\nNetwork construction:\n'
    model = construct_network_mnist(params, data_shape)
    save_network(model, params['problem_name'], params['target_name'], params['network_name'])

    # Step 3. training
    print '\n\nTraining:\n'
    model = set_training_parameters(model, params['loss'], params['optimizer'], params['metrics'])
    model, history = run_training(model, x_train, y_train, params['batch_size'], params['nb_epoch'], x_test, y_test)

    print 'Training history:'
    print history.history

    # Step 4. validation with ground truth data
    print '\n\nTest result:'
    score = validate_trained_network(model, x_test, y_test)

    print('Test score: ', score[0])
    print('Test accuracy: ', score[1])

    return score
コード例 #6
0
def run_model():
    # load data
    print('Load data...')
    train_x, test_x, train_y, test_y = du.load_fx_10m_xy(test_size=.2,
                                                         y_shape_mode=0)

    seq_len = train_x.shape[-1]
    input_channels = train_x.shape[-2]
    output_size = train_y.shape[-1]

    print('seq_len: %d, input_channels: %d, output_size: %d' %
          (seq_len, input_channels, output_size))

    # hyperparameters
    kernel_size = 3
    dropout = .2
    channel_sizes = [128, 128]

    model = FXTCN(input_channels,
                  output_size,
                  channel_sizes=channel_sizes,
                  kernel_size=kernel_size,
                  dropout=dropout)

    epochs = 500
    lr = .001
    loss_func = nn.L1Loss(size_average=True)

    loss = training.run_training(model, (train_x, test_x, train_y, test_y),
                                 loss_func,
                                 lr=lr,
                                 epochs=epochs,
                                 print_every=100,
                                 test_loss_func=torch.nn.functional.l1_loss)

    return loss
コード例 #7
0
from parameters import get_project_key, get_experiment_key
from data_ingest import run_ingest
from data_cleansing import run_cleansing
from training import run_training
from mlaide._api_client.errors import ApiResponseError

import traceback

if __name__ == "__main__":
    project_key = get_project_key()
    experiment_key = get_experiment_key()

    try:
        run_ingest(project_key, experiment_key)
        run_cleansing(project_key, experiment_key)
        run_training(project_key, experiment_key, True, 0.6, 0.5)
        run_training(project_key, experiment_key, True, 0.4, 0.4)
        run_training(project_key, experiment_key, True, 0.8, 0.3)
        run_training(project_key, experiment_key, False, 0.8, 0.8)
        run_training(project_key, experiment_key, False, 0.7, 0.9)
    except ApiResponseError as e:
        print("API response error")
        print("status code: " + str(e.error.code))
        print("message: " + str(e.error.message))
        traceback.print_exc()
コード例 #8
0
# It is very useful to track our hyper parameter through the execution
#  and save it with its performance.
params = {
    "N_train": 50000 if not (run_Kaggle) else None,
    "biased": True,
    "activation": "linear",
    "optimizer": "adam",
    "nb_epochs": 10,
    "roll2vec": False,
    "embs_multiplier": 3,
    "drop_rate": 0.1,
    "multi_dense": True,
    "dense_acti": "linear",
    "my_patience": 4,
    "full_pred": False,
}

for i in range(1):
    print("#### RUN {} ####".format(i + 1))

    params = run_preproc(df_name, test=run_Kaggle, params=params)

    params = run_training(df_name, model_name, is_GPU=is_GPU, params=params)

    if run_Kaggle:
        predictKaggle(df_name, model_name, is_GPU=is_GPU, params=params)

    perfs = data.get_perfs(params["train_id"])
    print(perfs)

    time.sleep(30)  # Cooldown
コード例 #9
0
ファイル: train_all.py プロジェクト: yoanyomba123/BRAINSTools
from script import get_data, nm_dir
from training import run_training
import cPickle as pickle
import os

cache_dir = "/Shared/sinapse/CACHE/20160608_RF_Classifiers_Abs_Gradient"
if not os.path.isdir(cache_dir):
    os.makedirs(cache_dir)
print("Getting Data")
data = get_data(os.path.join(cache_dir, "fs_norm_data.hdf5"),
                nm_dir,
                overwrite=True,
                out_dir=cache_dir)
# data = get_data(os.path.join("/Shared/sinapse/CACHE/20160606_RF_Classifiers", "fs_norm_data.hdf5"), nm_dir,
#                overwrite=False, out_dir=cache_dir)
print("Training Classifiers")
classifiers = run_training(data,
                           train_base_clf=False,
                           out_dir=cache_dir,
                           n_jobs=8)
print("Saving Classifiers")
pickle.dump(classifiers,
            open(os.path.join(cache_dir, "ClassifierDictionary.pkl"), 'wb'))
コード例 #10
0
import yaml
from pprint import pformat
from training import run_training


CONFIG = yaml.load(open('CONFIG.yaml'), Loader=yaml.FullLoader)
RESULTS = run_training(CONFIG)
print(pformat(RESULTS))

コード例 #11
0
import training

training.run_training()
コード例 #12
0
ファイル: train_main.py プロジェクト: uber-research/tailr
def main(
    log_dir: str,
    dataset: str,
    clfmode: str,
    classifier_init_period: int,
    clf_thresh: float,
    n_steps: int,
    max_gen_batches: int,
    class_conditioned: bool,
    cluster_wait_steps: int,
    experiment_name: str,
    save_viz: bool,
    batch_mix: str,
    class_order: List[int],
    encoder_type: str,
    decoder_type: str,
    ll_thresh: int,
) -> None:
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    logging.get_absl_handler().use_absl_log_file('CURL_TRAIN_LOG', log_dir)
    experiment_name = experiment_name if experiment_name else f'{clfmode}_csv_{dataset}_{n_steps}'
    training.run_training(
        dataset=dataset,
        output_type='bernoulli',
        n_y=50,
        n_y_active=1,
        n_z=32,
        cluster_wait_steps=cluster_wait_steps,
        training_data_type='sequential',
        n_concurrent_classes=1,
        lr_init=1e-3,
        lr_factor=1.,
        lr_schedule=[1],
        blend_classes=False,
        train_supervised=False,
        n_steps=n_steps,
        report_interval=10,
        knn_values=[10],
        random_seed=1,
        encoder_kwargs=encoder_kwargs_dict[encoder_type],
        decoder_kwargs=decoder_kwargs_dict[decoder_type],
        dynamic_expansion=True,
        ll_thresh=ll_thresh,
        classify_with_samples=False,
        gen_replay_type='dynamic',
        use_supervised_replay=False,
        batch_mix=batch_mix,
        experiment_name=experiment_name,
        clf_mode=clfmode,
        gen_save_image_count=40,
        max_gen_batches=max_gen_batches,
        classifier_init_period=classifier_init_period,
        clf_thresh=clf_thresh,
        class_order=class_order,
        class_conditioned=class_conditioned,
        save_viz=save_viz,
        need_oracle=True,
    )

    training.move_time_log(experiment_name)
コード例 #13
0
def classifier(N_CLASSES, IMG_W, IMG_H, BATCH_SIZE, MAX_STEP, CAPACITY,
               model1_data, model2_data, learning_rate, logs_dir, total):
    H_Feature, train_batch, train_label_batch, randomList = training.run_training(
        N_CLASSES, IMG_W, IMG_H, BATCH_SIZE, MAX_STEP, CAPACITY, model1_data, learning_rate, total)
    L_Feature = training_D.run_training(N_CLASSES, IMG_W, IMG_H, BATCH_SIZE, MAX_STEP,
         CAPACITY, model2_data, learning_rate, randomList, total)

    # fusion algorithm2
    H_Feature = tf.reshape(H_Feature, [-1])
    L_Feature = tf.reshape(L_Feature, [-1])
    local4 = L_Feature+H_Feature;
    # local4 = tf.reshape(local, shape=[1,-1])
   

    # fusion algorithem 1
    # local4=tf.concat([H_Feature,L_Feature],1)
    with tf.variable_scope('local3', reuse=None) as scope:
        reshape = tf.reshape(local4, shape=[16, -1])
        dim = reshape.get_shape()[1].value
        weights = tf.get_variable('weights',
                                  shape=[dim, 128],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[128],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)

 
    with tf.variable_scope('softmax_linear') as scope:
        weights = tf.get_variable('softmax_linear',
                                  # shape=[256, N_CLASSES],
                                  shape=[128, N_CLASSES],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[N_CLASSES],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        train_logits = tf.add(tf.matmul(local3, weights), biases, name='softmax_linear')


    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train__acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_dir, sess.graph)
    saver = tf.train.Saver()

    # transfer learning
    # ckpt = tf.train.get_checkpoint_state(logs_train_dirnew)
    # if ckpt and ckpt.model_checkpoint_path:
    #     global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
    #     saver.restore(sess, ckpt.model_checkpoint_path)
    # print('Loading success, global_step is %s' % global_step)

    #non-transfer
    sess.run(tf.global_variables_initializer())

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = \
                sess.run([train_op, train_loss, train__acc])
            if step % 50 == 0:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)
            if step % 500 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()