コード例 #1
0
def train_lstm(x):
    lstm.zero_grad()

    # initialise the hidden state and long term memory of LSTM with zero tensor
    lstm.h, lstm.c = lstm.init_hidden()

    # compute the fixed content feature from the last frame
    h_c, skip = content_encoder(x[args.n_past - 1])
    h_c = h_c.detach()

    # compute the pose features for each of the time step
    h_p = [
        pose_encoder(x[i])[0].detach()
        for i in range(args.n_past + args.n_future)
    ]

    mse = 0.0
    for i in range(1, args.n_past + args.n_future):
        pred = lstm(torch.cat(
            [h_c, h_p[i - 1]],
            dim=1))  # predict the pose features sequentially using the LSTM
        mse += mse_loss(
            pred, h_p[i]
        )  # compare the predicted pose with the ground truth computed in line 238 and get the mse loss

    # backprop and update the parameters
    mse.backward()
    optimizer_lstm.step()

    # return the mse value after taking mean across all time steps
    return mse / (args.n_past + args.n_future)
コード例 #2
0
ファイル: main.py プロジェクト: garikoitz/deep-retina
def fit_lstm(cell, stimulus_type, num_timesteps):
	RMSmod = RMSprop(lr=0.001, rho=0.99, epsilon=1e-6)
	mdl = lstm(cell, stimulus_type, num_timesteps=num_timesteps, num_filters=(8,16), filter_size=(13,13), loss='poisson_loss', optimizer=RMSmod, weight_init='he_normal', l2_reg=0.01)
	batchsize = 100
	num_epochs = 150
	save_weights_every = 50
	mdl.train(batchsize, num_epochs=num_epochs, save_every=save_weights_every)
	return mdl
コード例 #3
0
ファイル: deep_training.py プロジェクト: spoilr/rbam-wild
def final_model(model_type, nr_epochs, parents, children, all_texts, targets):
    parents_tensor, children_tensor, labels, word_index, targets = get_sequences(
        parents, children, all_texts, targets)

    if model_type == 'no_extra_concat_lstm_dropout_after_lstm':
        model, file_name = lstm(word_index,
                                merge_mode='concat',
                                dense_layer_after_merge=False)
    elif model_type == 'concat_lstm_dropout_after_lstm':
        model, file_name = lstm(word_index,
                                merge_mode='concat',
                                dense_layer_after_merge=True)
    elif model_type == 'sum_lstm_dropout_after_lstm':
        model, file_name = lstm(word_index,
                                merge_mode='sum',
                                dense_layer_after_merge=True)
    elif model_type == 'concat_bidir_lstm_dropout_after_lstm':
        model, file_name = bidir_lstm(word_index,
                                      merge_mode='concat',
                                      dense_layer_after_merge=True)

    early_stopping = EarlyStopping(monitor='val_acc', patience=2, verbose=0)
    checkpoint = ModelCheckpoint(file_name,
                                 monitor='val_acc',
                                 verbose=0,
                                 save_best_only=True,
                                 mode='auto')
    model.fit([parents_tensor, children_tensor],
              labels,
              validation_split=VALIDATION_SPLIT,
              nb_epoch=nr_epochs,
              batch_size=BATCH_SIZE,
              callbacks=[early_stopping, checkpoint],
              shuffle=False,
              verbose=0)

    predict(parents_tensor, children_tensor, labels, file_name)

    return model, file_name
コード例 #4
0
def fit_lstm(cell, stimulus_type, num_timesteps):
    RMSmod = RMSprop(lr=0.001, rho=0.99, epsilon=1e-6)
    mdl = lstm(cell,
               stimulus_type,
               num_timesteps=num_timesteps,
               num_filters=(8, 16),
               filter_size=(13, 13),
               loss='poisson_loss',
               optimizer=RMSmod,
               weight_init='he_normal',
               l2_reg=0.01)
    batchsize = 100
    num_epochs = 150
    save_weights_every = 50
    mdl.train(batchsize, num_epochs=num_epochs, save_every=save_weights_every)
    return mdl
コード例 #5
0
def build_lstm_model(num_features,
                     embedding_size=None,
                     kernel_size=None,
                     filters=None,
                     pool_size=None,
                     lstm_output_size=None):
    """
    Builds and compiles an LSTM model with the provided hyper-parameters
    Args:
        num_features:
        embedding_size:
        kernel_size:
        filters:
        pool_size:
        lstm_output_size:

    Returns:

    """
    # Embedding
    if embedding_size is None:
        embedding_size = 64

    # Convolution
    if kernel_size is None:
        kernel_size = 5
    if filters is None:
        filters = 64
    if pool_size is None:
        pool_size = 4

    # LSTM
    if lstm_output_size is None:
        lstm_output_size = 70

    print('Build model...')

    lstm_model = models.lstm(num_features,
                             embedding_size=embedding_size,
                             kernel_size=kernel_size,
                             filters=filters,
                             pool_size=pool_size,
                             lstm_output_size=lstm_output_size)

    return lstm_model
コード例 #6
0
import matplotlib.pyplot as plt
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"  
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
root_path = os.path.dirname(os.path.abspath('__file__'))
import sys
sys.path.append(root_path)
from models import lstm

if __name__ == "__main__":

    lstm(
        root_path=root_path,
        station='Huaxian',
        seed=1,
        epochs_num=5000,
        batch_size=128,
        learning_rate=0.007,
        decay_rate=0.0,
        hidden_layer=1,
        hidden_units_1=8,
        dropout_rate_1=0.0,
        hidden_units_2=8,
        dropout_rate_2=0.0,
        early_stoping=True,
        retrain=False,
        warm_up=False,
        initial_epoch=None,
    )
    plt.show()
コード例 #7
0
def main():
    run = True
    state = 2
    env_name = 'HumanoidFlagrunBulletEnv-v0'
    if state == 0:
        env = atari_wrappers.wrap_deepmind(atari_wrappers.make_atari(env_name),
                                           episode_life=True,
                                           clip_rewards=True,
                                           frame_stack=True,
                                           scale=True)
    else:
        env = gym.make(env_name)
    if isinstance(env.action_space, Box):
        output_size = env.action_space.shape[0]
    else:
        output_size = env.action_space.n

    with tf.Session() as sess:
        name = 'flag_rnd3'
        with tf.variable_scope(name):
            input = tf.placeholder(tf.float32,
                                   [None, *env.observation_space.shape])
            state_rms = RunningMeanStd(sess, shape=env.observation_space.shape)
            norm_input = tf.clip_by_value(
                (input - state_rms._mean) / tf.sqrt(state_rms._var), -5, 5)

            if state == 0:
                with tf.variable_scope('policy'):
                    network = models.nature_cnn(input)
                norm_input = norm_input[:, :, :, 0]
                with tf.variable_scope('target'):
                    target_net = models.add_dense(
                        models.nature_cnn(norm_input), 256, name='dense1')
                with tf.variable_scope('predict'):
                    predict_net = models.add_dense(
                        models.nature_cnn(norm_input), 256, name='dense1')
                with tf.variable_scope('value'):
                    value_net = models.nature_cnn(input)
                with tf.variable_scope('value_in'):
                    value_in_net = models.nature_cnn(input)
                model = RND(sess, input, state_rms, network, actiontype.Discrete, output_size, target_net, predict_net, value_in_net,\
                     value_network=value_net, gamma=0.999, learning_rate=lambda f : 0.0001, epochs=4, minibatch_size=4, beta2=0.01, name=name)
            else:
                if state == 1:
                    with tf.variable_scope('policy'):
                        network, seq_len, init_state, last_state = models.lstm(
                            models.mlp(input), 64)
                    with tf.variable_scope('target'):
                        target_net = models.add_dense(models.mlp(norm_input),
                                                      256,
                                                      name='dense2')
                    with tf.variable_scope('predict'):
                        predict_net = models.add_dense(models.mlp(norm_input),
                                                       256,
                                                       name='dense2')
                    with tf.variable_scope('value_in'):
                        value_in_net = models.mlp(input)
                    model = RND(sess, input, state_rms, network, actiontype.Discrete, output_size, target_net, predict_net, value_in_net, epochs=4, minibatch_size=8, gamma=0.99, beta2=0.01, epsilon=0.1,\
                        coef_in=1., learning_rate=lambda f : 2.5e-4*(1-f), name=name, )
                elif state == 2:
                    with tf.variable_scope('policy'):
                        network = models.mlp(norm_input)
                    with tf.variable_scope('target'):
                        target_net = models.add_dense(models.mlp(norm_input),
                                                      256,
                                                      name='dense2')
                    with tf.variable_scope('predict'):
                        predict_net = models.add_dense(models.mlp(norm_input),
                                                       256,
                                                       name='dense2')
                    with tf.variable_scope('value'):
                        value_net = models.mlp(norm_input)
                    with tf.variable_scope('value_in'):
                        value_in_net = models.mlp(norm_input)
                    model = RND(sess, input, state_rms, network, actiontype.Continuous, output_size, target_net, predict_net, value_in_net, value_network=value_net, epochs=10, minibatch_size=32, gamma=0.99, beta2=0.000, epsilon=0.2, \
                        coef_in=.5, learning_rate=lambda f : 3e-4*(1-f), name=name)
        if run:
            run_only(sess, model, env, render=True)
        else:
            if state == 0:
                train(sess,
                      model,
                      env_name,
                      10000000,
                      256,
                      num_envs=16,
                      atari=True)
            elif state == 1:
                train(sess, model, env_name, 5e6, 128, num_envs=16)
            elif state == 2:
                train(sess,
                      model,
                      env_name,
                      100e6,
                      2048,
                      num_envs=24,
                      log_interval=5)
        env.close()
コード例 #8
0
        'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'
    ]

    Y = train[target_columns]

    embedding_matrix, X, X_ = tp.embedded_glove_matrix(train, test,
                                                       'comment_text',
                                                       glove_path, embed_size,
                                                       max_features,
                                                       max_length)
    inp = Input(shape=(max_length, ))
    x = None
    if model_type == 'lstm-cnn':
        x = models.lstm_cnn(max_features, embed_size, embedding_matrix, inp)
    elif model_type == 'lstm':
        x = models.lstm(max_features, embed_size, embedding_matrix, inp)
    elif model_type == 'cnn':
        x = models.cnn(max_features, embed_size, embedding_matrix, inp)

    model = Model(inputs=inp, outputs=x)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    model.fit(X, Y, batch_size=batch_size, epochs=epochs)
    if save_weights:
        model.save_weights('checkpoint.csv')

    sub = model.predict(X_)

    temp = test.copy()
コード例 #9
0
    # input normalization
    # sc = StandardScaler()
    # x_data = sc.fit_transform(df[x_labels])
    # x_data = pd.DataFrame(data=x_data, columns=x_labels)
    # df = x_data.join(df[y_label])

    # x_data, y_data = ip.reshape_for_rnn(df)

    log.logger.info("x_shape: " + str(x_data.shape) + ", y_shape:" +
                    str(y_data.shape))

    x_train, x_test, y_train, y_test = train_test_split(
        x_data, y_data, test_size=FLAGS.test_size, random_state=FLAGS.seed)

    # Create model
    model = models.lstm(input_shape=(x_data.shape[1], x_data.shape[2]))

    # Start training
    model.fit(x_train,
              y_train,
              validation_data=(x_test, y_test),
              epochs=FLAGS.n_e,
              batch_size=FLAGS.b_s,
              verbose=FLAGS.verbose)

    # Evaluate the model
    scores = model.evaluate(x_test, y_test)
    log.logger.info("ACC(test):\t" + str(scores[2] * 100) + "%\t" +
                    log.filename + " s" + str(FLAGS.seed) + "\t")
    log.logger.info("MSE(test):\t" + str(scores[1]) + "\t" + log.filename +
                    " s" + str(FLAGS.seed) + "\t")
コード例 #10
0
ファイル: temporalnet.py プロジェクト: elsakoh/cs3244-g26
def train_VGG_classifier(use_validation=False, use_val_for_training = False, num_features=4096,
          learning_rate=0.0001, epochs=3000, threshold=0.5, exp='', batch_norm=True,
          mini_batch_size=64, save_plots=True, save_features=False, classification_method='MLP',
          val_size=10, weight_0=1, dataset_name='', features_file='', labels_file=''):
    # ========================================================================
    # FETCH FEATURE EXTRACTOR
    # ========================================================================
    model = VGG16(num_features)

    # ========================================================================
    # WEIGHT INITIALIZATION
    # ========================================================================
    layerscaffe = ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1',
		   'conv3_2', 'conv3_3', 'conv4_1', 'conv4_2', 'conv4_3',
		   'conv5_1', 'conv5_2', 'conv5_3', 'fc6', 'fc7', 'fc8']
    h5 = h5py.File(vgg_16_weights, 'r')

    layer_dict = dict([(layer.name, layer) for layer in model.layers])

    # Copy the weights stored in the 'vgg_16_weights' file to the
    # feature extractor part of the VGG16
    for layer in layerscaffe[:-3]:
        w2, b2 = h5['data'][layer]['0'], h5['data'][layer]['1']
        w2 = np.transpose(np.asarray(w2), (2,3,1,0))
        w2 = w2[::-1, ::-1, :, :]
        b2 = np.asarray(b2)
        layer_dict[layer].set_weights((w2, b2))

    # Copy the weights of the first fully-connected layer (fc6)
    layer = layerscaffe[-3]
    w2, b2 = h5['data'][layer]['0'], h5['data'][layer]['1']
    w2 = np.transpose(np.asarray(w2), (1,0))
    b2 = np.asarray(b2)
    layer_dict[layer].set_weights((w2, b2))

    # ========================================================================
    # FEATURE EXTRACTION
    # ========================================================================
    if save_features:
        saveFeatures(model, features_file, labels_file, features_key, labels_key, num_features)

    # ========================================================================
    # TRAINING
    # ========================================================================  

    adam = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
  
    h5features = h5py.File(features_file, 'r')
    h5labels = h5py.File(labels_file, 'r')
    
    # X_full will contain all the feature vectors extracted
    # from optical flow images
    X_full = h5features[features_key]
    _y_full = np.asarray(h5labels[labels_key])

    zeroes_full = np.asarray(np.where(_y_full==0)[0])
    ones_full = np.asarray(np.where(_y_full==1)[0])
    zeroes_full.sort()
    ones_full.sort()
    
    # Use a 5 fold cross-validation
    kf_falls = KFold(n_splits=5, shuffle=True)
    kf_falls.get_n_splits(X_full[zeroes_full, ...])
    
    kf_nofalls = KFold(n_splits=5, shuffle=True)
    kf_nofalls.get_n_splits(X_full[ones_full, ...])        

    sensitivities = []
    specificities = []
    fars = []
    mdrs = []
    accuracies = []
        
    fold_number = 1
    # CROSS-VALIDATION: Stratified partition of the dataset into
    # train/test sets
    for ((train_index_falls, test_index_falls),
    (train_index_nofalls, test_index_nofalls)) in zip(
        kf_falls.split(X_full[zeroes_full, ...]),
        kf_nofalls.split(X_full[ones_full, ...])
    ):

        train_index_falls = np.asarray(train_index_falls)
        test_index_falls = np.asarray(test_index_falls)
        train_index_nofalls = np.asarray(train_index_nofalls)
        test_index_nofalls = np.asarray(test_index_nofalls)

        X = np.concatenate((
            X_full[zeroes_full, ...][train_index_falls, ...],
            X_full[ones_full, ...][train_index_nofalls, ...]
        ))
        _y = np.concatenate((
            _y_full[zeroes_full, ...][train_index_falls, ...],
            _y_full[ones_full, ...][train_index_nofalls, ...]
        ))
        X_test = np.concatenate((
            X_full[zeroes_full, ...][test_index_falls, ...],
            X_full[ones_full, ...][test_index_nofalls, ...]
        ))
        y_test = np.concatenate((
            _y_full[zeroes_full, ...][test_index_falls, ...],
            _y_full[ones_full, ...][test_index_nofalls, ...]
        ))

        if use_validation:
            # Create a validation subset from the training set
            zeroes = np.asarray(np.where(_y==0)[0])
            ones = np.asarray(np.where(_y==1)[0])
            
            zeroes.sort()
            ones.sort()

            trainval_split_0 = StratifiedShuffleSplit(n_splits=1,
                            test_size=int(val_size/2),
                            random_state=7)
            indices_0 = trainval_split_0.split(X[zeroes,...],
                            np.argmax(_y[zeroes,...], 1))
            trainval_split_1 = StratifiedShuffleSplit(n_splits=1,
                            test_size=int(val_size/2),
                            random_state=7)
            indices_1 = trainval_split_1.split(X[ones,...],
                            np.argmax(_y[ones,...], 1))
            train_indices_0, val_indices_0 = indices_0.__next__()
            train_indices_1, val_indices_1 = indices_1.__next__()

            X_train = np.concatenate([X[zeroes,...][train_indices_0,...],
                        X[ones,...][train_indices_1,...]],axis=0)
            y_train = np.concatenate([_y[zeroes,...][train_indices_0,...],
                        _y[ones,...][train_indices_1,...]],axis=0)
            X_val = np.concatenate([X[zeroes,...][val_indices_0,...],
                        X[ones,...][val_indices_1,...]],axis=0)
            y_val = np.concatenate([_y[zeroes,...][val_indices_0,...],
                        _y[ones,...][val_indices_1,...]],axis=0)
        else:
            X_train = X
            y_train = _y

        # Balance the number of positive and negative samples so that
        # there is the same amount of each of them
        all0 = np.asarray(np.where(y_train==0)[0])
        all1 = np.asarray(np.where(y_train==1)[0])  

        if len(all0) < len(all1):
            all1 = np.random.choice(all1, len(all0), replace=False)
        else:
            all0 = np.random.choice(all0, len(all1), replace=False)
        allin = np.concatenate((all0.flatten(),all1.flatten()))
        allin.sort()
        X_train = X_train[allin,...]
        y_train = y_train[allin]
    
        # ==================== CLASSIFIER ========================
        if classification_method == 'MLP':
            classifier = mlp(num_features, batch_norm)
        else:
            # TODO: handle case where validation is not done
            new_feature_length = int(num_features / 4)
            data = sample_data([X_train, X_test, X_val], new_feature_length)
            X_train = data[0]
            X_train = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))
            X_test = data[1]
            X_test = np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1]))
            X_val = data[2]
            X_val = np.reshape(X_val, (X_val.shape[0], 1, X_val.shape[1]))
            classifier = lstm(seq_length=1, feature_length=new_feature_length, nb_classes=1)

        fold_best_model_path = best_model_path + '{}_fold_{}.h5'.format(
            dataset_name,
            fold_number
        )
        classifier.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])

        if not use_checkpoint:
            # ==================== TRAINING ========================     
            # weighting of each class: only the fall class gets
            # a different weight
            class_weight = {0: weight_0, 1: 1}

            callbacks = None
            if use_validation:
                # callback definition
                metric = 'val_loss'
                e = EarlyStopping(monitor=metric, min_delta=0, patience=2,
                        mode='auto')
                c = ModelCheckpoint(fold_best_model_path, monitor=metric,
                            save_best_only=True,
                            save_weights_only=False, mode='auto')
                callbacks = [e, c]
            validation_data = None
            if use_validation:
                validation_data = (X_val,y_val)
            _mini_batch_size = mini_batch_size
            if mini_batch_size == 0:
                _mini_batch_size = X_train.shape[0]

            history = classifier.fit(
                X_train, y_train, 
                validation_data=validation_data,
                batch_size=_mini_batch_size,
                epochs=epochs,
                shuffle=True,
                class_weight=class_weight,
                callbacks=callbacks
            )

            #if not use_validation:
            #   classifier.save(fold_best_model_path)

            plot_training_info(plots_folder + exp, ['accuracy', 'loss'],
                    save_plots, history.history)

            if use_validation and use_val_for_training:
                #classifier = load_model(fold_best_model_path)

                # Use full training set (training+validation)
                X_train = np.concatenate((X_train, X_val), axis=0)
                y_train = np.concatenate((y_train, y_val), axis=0)

                history = classifier.fit(
                    X_train, y_train, 
                    validation_data=validation_data,
                    batch_size=_mini_batch_size,
                    epochs=epochs,
                    shuffle='batch',
                    class_weight=class_weight,
                    callbacks=callbacks
                )

                classifier.save(fold_best_model_path)

        # ==================== EVALUATION ========================     
        # TODO: Load model as required
        # Load best model
        #print('Model loaded from checkpoint')
        #classifier = load_model(fold_best_model_path)

        predicted = classifier.predict(np.asarray(X_test))
        for i in range(len(predicted)):
            if predicted[i] < threshold:
                predicted[i] = 0
            else:
                predicted[i] = 1
        # Array of predictions 0/1
        predicted = np.asarray(predicted).astype(int)   
        # Compute metrics and print them
        cm = confusion_matrix(y_test, predicted,labels=[0,1])
        tp = cm[0][0]
        fn = cm[0][1]
        fp = cm[1][0]
        tn = cm[1][1]
        tpr = tp/float(tp+fn)
        fpr = fp/float(fp+tn)
        fnr = fn/float(fn+tp)
        tnr = tn/float(tn+fp)
        precision = tp/float(tp+fp)
        recall = tp/float(tp+fn)
        specificity = tn/float(tn+fp)
        f1 = 2*float(precision*recall)/float(precision+recall)
        accuracy = accuracy_score(y_test, predicted)
        
        print('FOLD {} results:'.format(fold_number))
        print('TP: {}, TN: {}, FP: {}, FN: {}'.format(tp,tn,fp,fn))
        print('TPR: {}, TNR: {}, FPR: {}, FNR: {}'.format(
                        tpr,tnr,fpr,fnr))   
        print('Sensitivity/Recall: {}'.format(recall))
        print('Specificity: {}'.format(specificity))
        print('Precision: {}'.format(precision))
        print('F1-measure: {}'.format(f1))
        print('Accuracy: {}'.format(accuracy))
        
        # Store the metrics for this epoch
        sensitivities.append(tp/float(tp+fn))
        specificities.append(tn/float(tn+fp))
        fars.append(fpr)
        mdrs.append(fnr)
        accuracies.append(accuracy)
        fold_number += 1

    print('5-FOLD CROSS-VALIDATION RESULTS ===================')
    print("Sensitivity: %.2f%% (+/- %.2f%%)" % (np.mean(sensitivities)*100.,
                        np.std(sensitivities)*100.))
    print("Specificity: %.2f%% (+/- %.2f%%)" % (np.mean(specificities)*100.,
                        np.std(specificities)*100.))
    print("FAR: %.2f%% (+/- %.2f%%)" % (np.mean(fars)*100.,
                    np.std(fars)*100.))
    print("MDR: %.2f%% (+/- %.2f%%)" % (np.mean(mdrs)*100.,
                    np.std(mdrs)*100.))
    print("Accuracy: %.2f%% (+/- %.2f%%)" % (np.mean(accuracies)*100.,
                        np.std(accuracies)*100.))
コード例 #11
0
def learn(profile):

	# make a result directory
	pathlib.Path('result').mkdir(parents=True, exist_ok=True)

	title = profile['title']
	arg_list = profile['arg_list']

	for args in arg_list:
		pprint.pprint(args)
		
		type_ = args['type']

		shift_num = args['shift_num']
		x_labels = args['x_labels'][:]
		y_label = args['y_label']

		if args['file_list'] == None:
			file_list = glob("data/"+args['data_dir']+"/*.csv")
		else:
			file_list = args['file_list']

		if type_ == 'rnn':

			time_window = args['time_window']

			now = time.localtime()
			s_time = "%02d%02d-%02d%02d%02d" % (now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)
			save_dir = "result/%s/rnn-sn%d-tw%d_%s/" % (title, shift_num, time_window, s_time)

			x_train, y_train, x_test, y_test = reshape_data_rnn(args, file_list)

			

			model = models.lstm(input_shape=(x_data.shape[1], x_data.shape[2]))

		elif type_ == 'fc':

			history_labels = args['history_labels']
			history_num = args['history_num']

			now = time.localtime()
			s_time = "%02d%02d-%02d%02d%02d" % (now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)
			save_dir = "result/%s/fc-sn%d-hn%d_%s/" % (title, shift_num, history_num, s_time)

			# make save directory
			pathlib.Path(save_dir).mkdir(parents=True, exist_ok=True)

			x_train, y_train, x_test, y_test = reshape_data_fc(args, file_list, save_dir)

			# if args['test_file_list'] == None:
			# 	x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2, random_state=1)
			# else:
			# 	# x_train = x_data
			# 	# y_train = y_data
			# 	x_train, _, y_train, _ = train_test_split(x_data, y_data, test_size=1-args['train_data_size'], random_state=1)
			# 	x_test, y_test = reshape_data_fc(args, args['test_file_list'], save_dir)

			model = models.flexible_model_koo(input_dim=x_train.shape[1], output_dim=1, weights=args['hidden_layer_weights'])

		

		# make save directory
		pathlib.Path(save_dir).mkdir(parents=True, exist_ok=True)
		pathlib.Path(save_dir+'figures/').mkdir(parents=True, exist_ok=True)

		# callbacks
		callbacks = []
		checkpoint = ModelCheckpoint(filepath=save_dir+'weights.hdf5', save_best_only=True)
		earlystop = EarlyStopping(monitor='val_loss', min_delta=args['min_delta'], patience=100, verbose=1)
		tensorboard = TensorBoard(log_dir=save_dir)

		callbacks.append(checkpoint)
		if args['early_stop'] == True:
			callbacks.append(earlystop)
		callbacks.append(tensorboard)

		# Start training
		# history = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=args['epochs'],
		# 		  batch_size=args['batch_size'], verbose=1, callbacks=[checkpoint, earlystop, tensorboard])
		history = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=args['epochs'],
				  batch_size=args['batch_size'], verbose=1, callbacks=callbacks)

		# Save plots
		# print("figure size", plt.rcParams["figure.figsize"])
		plt.rcParams["figure.figsize"] = [12, 5]
		plt.plot(history.history['mean_acc'])
		plt.plot(history.history['val_mean_acc'])
		plt.title('model accuracy')
		plt.ylabel('accuracy')
		plt.xlabel('epoch')
		plt.legend(['train', 'test'], loc='upper left')
		# plt.show()
		plt.savefig(save_dir+'figures/mean.eps', format='eps', dpi=1200)
		plt.savefig(save_dir+'figures/mean.png')

		plt.gcf().clear()


		# print(type(history.history['loss']))
		history_all = history.history['loss'] + history.history['val_loss']
		# plt.ylim(min(history_all), max(history_all))
		plt.ylim(0, 1500000)
		plt.plot(history.history['loss'])
		plt.plot(history.history['val_loss'])
		plt.title('model loss')
		plt.ylabel('loss')
		plt.xlabel('epoch')
		plt.legend(['train', 'test'], loc='upper left')
		# plt.show()
		plt.savefig(save_dir+'figures/loss.eps', format='eps', dpi=1200)
		plt.savefig(save_dir+'figures/loss.png')


		# load the best model
		model.load_weights(save_dir+'weights.hdf5')

		# Evaluate the model
		scores = model.evaluate(x_test, y_test)

		result = {}
		result['acc'] = str(scores[2] * 100)
		result['mse'] = str(scores[1])
		result['train_data_shape'] = str(x_train.shape)
		result['test_data_shape'] = str(x_test.shape)

		with open(save_dir+'result.json', 'w') as json_file:
			json_file.write(json.dumps(result))

		### save the results ###

		# save prediction result
		predictions = model.predict(x_test)
		y_test_t = y_test.reshape((-1, 1))

		print(x_test.shape, y_test.shape, predictions.shape)
		result_with_features = np.concatenate((x_test, y_test, predictions), axis=1)
		header = ",".join(args['x_labels']) + ",real,prediction"
		np.savetxt(save_dir+"result_with_features.csv", result_with_features, header=header, comments='', delimiter=",")

		predictions_train = model.predict(x_train)
		y_train_t = y_train.reshape((-1, 1))

		result = np.concatenate((y_test_t,predictions),axis=1)
		result_train = np.concatenate((y_train_t, predictions_train), axis=1)

		result = result[result[:,0].argsort()]
		result_train = result_train[result_train[:,0].argsort()]

		# result_trend = np.polyfit(np.arange(len(result)), result[:,1], 1)

		plt.gcf().clear()

		if args['y_label'][0] == 'power':
			plt.ylim(8500, 17500)
		else:
			plt.ylim(result[:,[0,1]].min(), result[:,[0,1]].max())
		plt.plot(result[:,0], 'o', markersize=3)
		plt.plot(result[:,1], 'o', markersize=3)
		# plt.plot(result_trend, 'r--')
		# plt.plot(result)
		plt.title('test result')
		plt.ylabel('power')
		# plt.xlabel('epoch')
		plt.legend(['real', 'prediction'], loc='upper left')
		# plt.show()
		plt.savefig(save_dir+'figures/test_result.eps', format='eps', dpi=1200)
		plt.savefig(save_dir+'figures/test_result.png')

		plt.gcf().clear()

		if args['y_label'][0] == 'power':
			plt.ylim(8500, 17500)
		else:
			plt.ylim(result_train[:,[0,1]].min(), result_train[:,[0,1]].max())
		plt.plot(result_train[:,0], 'o', markersize=3)
		plt.plot(result_train[:,1], 'o', markersize=3)
		# plt.plot(result)
		plt.title('train result')
		plt.ylabel('power')
		# plt.xlabel('epoch')
		plt.legend(['real', 'prediction'], loc='upper left')
		# plt.show()
		plt.savefig(save_dir+'figures/train_result.eps', format='eps', dpi=1200)
		plt.savefig(save_dir+'figures/train_result.png')

		np.savetxt(save_dir+"pred_result.csv", result, delimiter=",")
		np.savetxt(save_dir+"pred_result-train.csv", result_train, delimiter=",")


		# Save model
		model_json = model.to_json()
		with open(save_dir+"model.json", "w") as json_file:
			json_file.write(model_json)  # serialize model to JSON
		# model.save_weights(save_dir+"weight.h5")  # weight
		print("Save model ... done")

		# Save args
		with open(save_dir+'args.json', 'w') as json_file:
			json_file.write(json.dumps(args))

		# Save visualized model
		# plot_model(model, to_file=save_dir+'model_plot.png', show_shapes=True, show_layer_names=True)

		K.clear_session()
コード例 #12
0
ファイル: run.py プロジェクト: liangxun/pytorch-sunspot
import EvaluationIndex

# hyperparams
timesteps = 10
dim = 2
epochs = 10
lr = 0.001
batchsize = 128
ahead = 1
layers = [32, ahead]
datapath = './sunspot_ms_dim{}.csv'.format(2)
modelpath = './models/{}_step{}_dim{}_rmse{:.3f}.pt'

print('>Loading model...')
# model = models.rnn()
model = models.lstm(input_dim=dim, layers=layers, num_layers=2)
optimizer = optim.RMSprop(model.parameters(), lr=lr)
loss_func = nn.MSELoss()
print(model)

# load data
print('> Loading data... ')
DataLoader = load.DataPreprocess()
x_train, y_train, x_test, y_test = DataLoader.lstm_load_data(filename=datapath,
                                                             seq_len=timesteps,
                                                             ahead=ahead,
                                                             dim=dim,
                                                             row=1686 -
                                                             timesteps)
train_dataset = Data.TensorDataset(torch.Tensor(x_train),
                                   torch.Tensor(y_train))
コード例 #13
0
    print("---------------------------------------")
    print(f"Policy: {args.policy}, Env: {args.env}, Seed: {args.seed}")
    print("---------------------------------------")

    if not os.path.exists("./results"):
        os.makedirs("./results")

    if args.save_model and not os.path.exists("./checkpoint"):
        os.makedirs("./checkpoint")

    # Use specific gpus
    os.environ["CUDA_VISIBLE_DEVICES"]=device_list
    # Device setting
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # init base network
    model = lstm(input_size=num_joints*2).to(device)
    resume_model(model,checkpoint)
    # init dataset
    dataset = CSL_Isolated_Openpose('trainval')

    env = Environment(model,dataset)

    # Set seeds
    env.seed(args.seed)
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    kwargs = {
        "state_dim": state_dim,
        "action_dim": action_dim,
        "discount": args.discount,