Exemple #1
0
def train(args):
    mlp = MLPClassifier(hidden_layer_sizes=args.layers,
                        alpha=args.alpha,
                        learning_rate_init=args.learning_rate,
                        max_iter=args.epochs)
    models.train_model(mlp, args.feature_extractor, args.num_examples)
    return mlp
Exemple #2
0
def parapharse_models(glove, train, dev, ppdb_file):
    ppdb = paraphrase.load_parap(ppdb_file)
    aug = augmented_dataset(glove, train, ppdb)
    train_aug = train + aug
    
    models.train_model(train_aug, dev, glove, model_filename = 'models/train_aug')
    models.train_model(train, dev, glove, model_filename = 'models/train_noaug')    
Exemple #3
0
def mixture_experiments(train, dev, glove, splits = 5):
    for i in range(splits):
        model_name = 'mixture' + str(i)
        print 'Model', model_name
        model = models.init_model()
        div = len(train) / splits
        models.train_model(train[:i*div] + train[(i+1)*div:splits*div], dev, glove, model, 'models/' + model_name)
Exemple #4
0
def construct_model(model_paths):
    models.train_model(model_paths, transfer=True)
    models.calculate_predictions(model_paths.test_solutions,
                                 model_paths.test_image_path,
                                 model_paths.test_true, model_paths.test_preds,
                                 model_paths.checkpoint_overall_path)
    models.eval_metrics(model_paths.test_true, model_paths.test_preds,
                        model_paths.test_conf_matrix,
                        model_paths.test_other_metrics)
Exemple #5
0
def grid_experiments(train, dev, glove, embed_size = 300, hidden_size = 100):
    lr_vec = [0.001, 0.0003, 0.0001]
    dropout_vec = [0.0, 0.1, 0.2]
    reg_vec = [0.0, 0.001, 0.0003, 0.0001]

    for params in itertools.product(lr_vec, dropout_vec, reg_vec):
	filename = 'lr' + str(params[0]).replace('.','') + '_drop' + str(params[1]).replace('.','') + '_reg' + str(params[2]).replace('.','')
	print 'Model', filename
	model = models.init_model(embed_size, hidden_size, params[0], params[1], params[2])
	models.train_model(train, dev, glove, model, 'models/' + filename)    
Exemple #6
0
def extended_tautologies(train, dev, glove):
    augment_data = generate_all(train)
    from random import shuffle
    shuffle(augment_data)
    augment_weight = [0, 0.05, 0.15, 0.5]
    for w in augment_weight:
        new_train = train + augment_data[:int(len(train)*w)]
	str =  str(w).replace('.','')
        model = models.init_model()
	models.train_model(new_train, dev, glove, model = model, model_dir = 'models/aug' + w_str)
def show_spiral_demo(baseline_model_weights_file=None,
                     mixup_model_weights_file=None):
    print("~~~~~~~~~~~~~~~~~")
    print("~~ SPIRAL DEMO ~~")
    print("~~~~~~~~~~~~~~~~~")

    x_train, y_train, x_val, y_val, x_test, y_test = data_loader.get_two_spirals_data(
        2000, noise=0.9)
    # visualizer.plot_spiral_dataset(x_train, y_train)

    spiral_model = models.create_spiral_model(models.MixupMode.NO_MIXUP)
    spiral_model_with_mixup = models.create_spiral_model(
        models.MixupMode.MANIFOLD_MIXUP)

    print("baseline spiral model")
    if baseline_model_weights_file:
        print("Loading model from file {}...".format(
            baseline_model_weights_file))
        spiral_model.load_weights(baseline_model_weights_file)
    else:
        print("Training...")
        models.train_model(
            spiral_model, (x_train, y_train), (x_val, y_val),
            batch_size=20,
            save_to_file=get_path_for_trained_models(spiral_model))

    print("Test Accuracy: {:.3f}".format(
        spiral_model.get_accuracy(x_test, y_test)))

    print("Spiral model with manifold mixup")
    if mixup_model_weights_file:
        print("Loading model from file {}...".format(mixup_model_weights_file))
        spiral_model_with_mixup.load_weights(mixup_model_weights_file)
    else:
        print("Training...")
        models.train_model(
            spiral_model_with_mixup, (x_train, y_train), (x_val, y_val),
            batch_size=20,
            save_to_file=get_path_for_trained_models(spiral_model_with_mixup))

    print("Test Accuracy: {:.3f}".format(
        spiral_model_with_mixup.get_accuracy(x_test, y_test)))

    visualizer.plot_spiral_model_confidence(spiral_model,
                                            x_train,
                                            y_train,
                                            title='no mixup')
    visualizer.plot_spiral_model_confidence(spiral_model_with_mixup,
                                            x_train,
                                            y_train,
                                            title='manifold mixup')
def main():
    add_transformed_vars = True

    all_predictions = None
    for label_column in args.label_columns:
        print('Training for Label {}'.format(label_column))
        (all_features_predictions_df, train_features, transformed_train_labels,
         train_labels, test_features, transformed_test_labels, test_labels,
         all_feature_names, all_label_names) = utils.prepare_data(
             args.input_features_filename, args.input_labels_filename,
             args.input_predictions_filename, args.transformation,
             label_column, args.train_size, args.test_size,
             add_transformed_vars, False, False)
        all_features = np.concatenate((train_features, test_features), axis=0)
        all_labels = np.concatenate((train_labels, test_labels), axis=0)
        all_transformed_labels = np.concatenate(
            (transformed_train_labels, transformed_test_labels), axis=0)
        #print 'Max label values: {}'.format(np.max(all_labels))
        #print 'Min label values: {}'.format(np.min(all_labels))
        if all_predictions is None:
            all_predictions = all_features_predictions_df[all_label_names]

        #print('\n*****************************\n')
        (model, best_params) = models.train_model(
            all_features, all_transformed_labels, args.algorithm,
            args.num_alphas, args.skip_cross_validation, args.alpha,
            args.l1_ratio, args.num_jobs)
        train_y_true = all_labels
        train_y_pred = utils.predict(all_features, model, label_column)
        #print 'R-Squared on train is {}'.format(r2_score(train_y_true, train_y_pred))

        #print('\n')
        #print 'MAE on train: {}'.format(mean_absolute_error(train_y_true, train_y_pred))
        #print 'MSE on train: {}'.format(mean_squared_error(train_y_true, train_y_pred))

        coefs = model.coef_
        print('Optimal parameters in final model is {}'.format(best_params))
        # info on most predictive coefs
        print('Model has {} non-zero coefficients.'.format(
            len(coefs[coefs != 0])))
        highest_indices = np.abs(coefs).argsort()[::-1]
        #print 'Top predictors of {}'.format(label_column)
        for i in range(min(10, len(coefs[coefs != 0]))):
            idx = highest_indices[i]
        #  print '{}: {}'.format(all_feature_names[idx], coefs[idx])

        # predict on all and write to file
        prediction_features = all_features_predictions_df[all_feature_names]
        label_predictions = utils.predict(prediction_features, model,
                                          label_column)
        # correct if below/above min/max
        label_predictions[label_predictions < np.min(all_labels)] = np.min(
            all_labels)
        label_predictions[label_predictions > np.max(all_labels)] = np.max(
            all_labels)
        all_predictions.loc[:, label_column] = label_predictions

        print('\n*****************************')
        print('*****************************\n')
    all_predictions.to_csv(args.output_prediction_filename)
Exemple #9
0
    def run(self):
        w = self.img_width
        h = self.img_height

        input_shape = (h, w, 1)

        tuned_path = 'tuned_model.h5'
        original_model = 'classification_model.h5'

        if not os.path.isfile(tuned_path):
            shutil.copyfile(original_model, tuned_path)

        builder = build_classification_model(input_shape=input_shape,
                                             num_classes=self.NUM_CLASSES)
        builder.load_weights(tuned_path)

        classifier = builder.get_complete_model(input_shape=input_shape)

        while True:
            examples, epochs = self.get_job()

            total = len(examples)
            m_train = int(round(total * self.SPLIT_RATIO))
            m_val = total - m_train

            shuffle(examples)

            train_examples = examples[:m_train]
            val_examples = examples[m_train:]

            train_gen = self.get_generator(train_examples, self.BATCH_SIZE)
            val_gen = self.get_generator(val_examples, self.BATCH_SIZE)

            train_model(model=classifier,
                        train_gen=train_gen,
                        validation_gen=val_gen,
                        m_train=m_train,
                        m_val=m_val,
                        mini_batch_size=self.BATCH_SIZE,
                        save_path=tuned_path,
                        epochs=epochs)

            metrics = classifier.evaluate_generator(
                generator=self.get_generator(val_examples, self.BATCH_SIZE),
                steps=calculate_num_steps(m_val, self.BATCH_SIZE))

            self.completed.emit(metrics[-1])
def show_bottleneck_representation_demo(baseline_weights_file=None,
                                        manifold_mixup_weights_file=None):
    print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
    print("~~ BOTTLENECK REPRESENTATION DEMO ~~")
    print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")

    x_train, y_train, x_val, y_val, x_test, y_test = data_loader.get_mnist_data(
    )

    mnist_model_b2 = models.create_mnist_model_bottleneck_2(
        mixup_mode=MixupMode.NO_MIXUP)
    mnist_model_b2_with_mixup = models.create_mnist_model_bottleneck_2(
        mixup_mode=MixupMode.MANIFOLD_MIXUP)

    print("baseline mnist 2-node bottelneck model")
    if baseline_weights_file:
        print("Loading model from file {}...".format(baseline_weights_file))
        mnist_model_b2.load_weights(baseline_weights_file)
    else:
        print("Training...")
        models.train_model(
            mnist_model_b2, (x_train, y_train), (x_val, y_val),
            save_to_file=get_path_for_trained_models(mnist_model_b2))

    print("Test Accuracy: {:.3f}".format(
        mnist_model_b2.get_accuracy(x_test, y_test)))

    print("mnist 2-node bottleneck model with manifold mixup")
    if manifold_mixup_weights_file:
        print("Loading model from file {}...".format(
            manifold_mixup_weights_file))
        mnist_model_b2_with_mixup.load_weights(manifold_mixup_weights_file)
    else:
        print("Training...")
        models.train_model(mnist_model_b2_with_mixup, (x_train, y_train),
                           (x_val, y_val),
                           save_to_file=get_path_for_trained_models(
                               mnist_model_b2_with_mixup))

    print("Test Accuracy: {:.3f}".format(
        mnist_model_b2_with_mixup.get_accuracy(x_test, y_test)))

    visualizer.show_b2_model_hidden_representation(mnist_model_b2, x_train,
                                                   y_train)
    visualizer.show_b2_model_hidden_representation(mnist_model_b2_with_mixup,
                                                   x_train, y_train)
Exemple #11
0
def main():

    if len(sys.argv) <= 2:
        print("Please provide the inputfile(realtimedata)", len(sys.argv))
        exit(-1)

    window = 20
    window2 = 10
    user_rating = 5
    chances = 0.2
    inputfile = sys.argv[1]
    out_dir = sys.argv[1].split('.')[0] + "_filtered"

    if len(sys.argv) == 3:
        print("User past rating out of 10", int(sys.argv[2]))
        user_rating = int(sys.argv[2])

    baseline_dataset = load_my_data("basedata/main.csv", ",")
    baseline_dataset = preprocess_data(baseline_dataset, window, window2)
    #print(baseline_dataset)
    m_mdls = mdl.train_model(baseline_dataset)
    print("DONE TRAINING MODELS WITH BASELINE DATASET")
    #os.mkdir(out_dir)

    filename = inputfile
    dataset1 = load_my_data(inputfile, ',')
    sliding_samples = 120
    rows = dataset1.shape[0]
    for timestep in range(int(rows / sliding_samples) * sliding_samples):
        start = timestep
        end = timestep + sliding_samples
        print("PROCESSING WINDOW FROM ", start, " UNTIL ", end)
        if end > rows:
            end = rows - 1
        if end - start + 1 < 12:
            timestep += sliding_samples
            continue
        dset1 = dataset1[start:end]
        dset1 = preprocess_data(dset1, window, window2)
        #print(dset1)
        out_lbls, prob = mdl.get_predictions(dset1, m_mdls)
        chances, user_rating = get_accident_probability(
            user_rating, out_lbls, prob, chances)
        #write_to_file(out_dir, inputfile.split('.')[0], dataset1)
        timestep += sliding_samples
    print("CHANCES OF ACCIDENT:", chances, " NEW USER RATING:",
          round(user_rating))
Exemple #12
0
def train_pipeline(training_pipeline_params: TrainingPipelineParams):
    logger.info(f"Start training with params: {training_pipeline_params}")

    load_data(training_pipeline_params.input_data_path,
              training_pipeline_params.input_data_url)
    data = read_data(training_pipeline_params.input_data_path)
    logger.info(f"Raw data shape: {data.shape}")

    train_df, val_df = split_train_val_data(
        data, training_pipeline_params.splitting_params)
    logger.info(f"Train df shape: {train_df.shape}")
    logger.info(f"Val df shape: {val_df.shape}")

    pipeline = build_transformer(training_pipeline_params.feature_params)
    pipeline.fit(train_df)
    logger.info(f"Transform fitted.")

    train_features = make_features(pipeline, train_df)
    train_target = extract_target(train_df,
                                  training_pipeline_params.feature_params)
    logger.info(f"Train features shape: {train_features.shape}")

    val_features = make_features(pipeline, val_df)
    val_target = extract_target(val_df,
                                training_pipeline_params.feature_params)
    logger.info(f"Val features shape: {train_features.shape}")

    model = get_model(training_pipeline_params.train_params)
    model = train_model(train_features, train_target, model)
    logger.info(f"Model trained.")

    predictions = predict_model(val_features, model)

    metrics = evaluate_model(predictions, val_target)

    path_to_model = save_artifacts(metrics, model, pipeline,
                                   training_pipeline_params)

    return path_to_model, metrics
Exemple #13
0
###############################################################################
#                       Creating and training the model                       #
###############################################################################

pl.seed_everything(3258)  # for reproducibility

model = ViscNet(hparams)
feats = model.composition_to_features(composition_df)
model.update_x_norm(feats.loc[train_val_idx].values)

train_ds, val_ds = train_val_ds(feats, viscosity_df, train_idx, val_idx)

train_model(
    model,
    train_ds,
    val_ds,
    num_workers=4,
    deterministic=True,
)

torch.save(model, r'files/viscnet.pt')

state_dict = model.state_dict()

# Learning curve
lc_train = model.learning_curve_train
lc_val = model.learning_curve_val[:-1]
lc_x = np.arange(1, len(lc_train) + 1)
learning_curve(lc_x, lc_train, lc_val, [1, len(lc_train)], 'ViscNet')

###############################################################################
Exemple #14
0
def main():
    print('Initializing...')

    base_path = os.path.dirname(sys.argv[0])

    images_paths = [
        os.path.normpath(os.path.join(base_path, 'datasets/', wwu_muenster_images_path)),
        os.path.normpath(os.path.join(base_path, 'datasets/', arte_lab_images_path))
    ]

    masks_paths = [
        os.path.normpath(os.path.join(base_path, 'datasets/', wwu_muenster_masks_path)),
        os.path.normpath(os.path.join(base_path, 'datasets/', arte_lab_masks_path))
    ]

    models_path = os.path.normpath(os.path.join(base_path, 'models/'))

    # Load data
    images = None
    masks = None

    for images_path, masks_path in zip(images_paths, masks_paths):
        images = storage.load_datamap(images_path, datamap=images)
        masks = storage.load_datamap(masks_path, datamap=masks)

    # Create dataset
    dataset = datasets.create_dataset(images, masks, rescale=(256, 256))

    # Augment dataset
    dataset = datasets.augment_dataset(dataset, new_size=2500)

    # Create model
    model = models.create_model()
    model.summary()

    loss_function = {
        'mse': tf.keras.losses.MeanSquaredError(name='mse'),
        'mae': tf.keras.losses.MeanAbsoluteError(name='mae'),
        'bce': tf.keras.losses.BinaryCrossentropy(name='bce')
    }[sys.argv[1]] if len(sys.argv) > 1 else tf.keras.losses.MeanSquaredError(name='mse')
    epochs = int(sys.argv[2]) if len(sys.argv) > 2 else 20

    print(loss_function)

    # Load weights
    if utils.input_boolean('Load weights?'):
        try:
            model.load_weights(os.path.join(models_path, loss_function.name))
        except Exception as ex:
            print('Load error!')
            print(ex)

    # Predict images (pre-training)
    print('Showing pre-training example predictions...')
    for index in range(5):
        predictions = model.predict(np.array([dataset.images[index]]))
        utils.print_prediction(dataset.images[index], dataset.labels[index], predictions[0])

    # Train model
    models.train_model(
        dataset, model,
        loss_function=loss_function,
        epochs=epochs
    )

    # Predict images (post-training)
    print('Showing post-training example predictions...')
    for index in range(5):
        predictions = model.predict(np.array([dataset.images[index]]))
        utils.print_prediction(dataset.images[index], dataset.labels[index], predictions[0])

    # Save weights
    if utils.input_boolean('Save weights?'):
        try:
            model.save_weights(os.path.join(models_path, loss_function.name))
        except Exception as ex:
            print('Save error!')
            print(ex)

    print('Terminating...')

    return model
Exemple #15
0
def train(args):
    svc = SVC(C=args.regularization, kernel=args.kernel, gamma=args.gamma)
    models.train_model(svc, args.feature_extractor, args.num_examples)
    return svc
Exemple #16
0
from models import lstm_model
from models import train_model
from preprocess import load_data
from preprocess import load_fast_text_embedding
from preprocess import get_embed_weights

max_len = 50
embedding_dim = 300
tokenizer, x_train, y_train, x_test, y_test, vocab_size = load_data(
    'sarcasm_v2.csv')

embedding_index = load_fast_text_embedding('wiki-news-300d-1M-subword.vec')
embedding_matrix = get_embed_weights(embedding_index, tokenizer)
model = lstm_model(vocab_size, embedding_matrix)
train_model(model, x_train, y_train, x_test, y_test)

#half embeddingd
# Validation Loss:1.128701367992565 	Validation Accuracy:0.6840490793889286
# Validation Accuracy:68.40% (+/- 0.00%)
# Train Loss:1.1293133928731907 	Train Accuracy:68.40490797546013
# Test Loss:1.110706667958593 	Test Accuracy:71.16564420834641
Exemple #17
0
    dataset = dataset.filter(
        lambda t, y, s: tf.equal(tf.shape(y)[0], batch_size))
    iterator = dataset.make_initializable_iterator()
    return iterator


batch_size = 32
num_epochs = 200
restore = True

filename = tf.placeholder(tf.string, shape=[])
iterator = create_iterator(filename, batch_size)

length, token, label = iterator.get_next()

output = train_model(token, label, length, batch_size)
infer_output = inference_model(token, label, length, batch_size)
pred = tf.argmax(output, axis=2)

loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label,
                                                      logits=output)
cost = tf.reduce_mean(loss)
updates = tf.train.AdamOptimizer(1e-4).minimize(cost)

sess = tf.Session()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())

if restore == True:
    saver.restore(sess, tf.train.latest_checkpoint('../models'))
Exemple #18
0
def run(args):
    test_loss = models.train_model(args, ex)

    ex.log_scalar('loss', test_loss)
    return test_loss
Exemple #19
0
                                         54,
                                         3,
                                         feature_extract=feat_extract)
        elif model_type == "S":
            model = mdl.StateModel(54, 3)
        # create loss function
        criterion = nn.MSELoss(reduction='sum')

        # define optimization method
        optimizer = opt.Adam(model.parameters(), lr=lr)
        model, train_history, val_history, val_loss = mdl.train_model(
            model,
            criterion,
            optimizer,
            dataloaders,
            dataset_sizes,
            num_epochs=50,
            model_type=model_type,
            weight_file=weight_file,
            suppress_log=False,
            hyperparam_search=True)

        if val_loss < best_loss:
            print('found better lr: {}, loss:{}'.format(lr, val_loss))
            print('previous best lr:{}, loss:{}'.format(best_lr, best_loss))
            best_loss = val_loss
            best_lr = lr

    print("Learning Rate search completed, best LR:{}".format(best_lr))
    #%%
    weight_decay_list = [0, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7]
Exemple #20
0
def detect_boxes():
    aws.detect_boxes()


if __name__ == '__main__':
    model_paths = models.initialize_default_paths()
    system_arguments = ' '.join(sys.argv[1:])

    if system_arguments == "aws":
        detect_boxes()

    if system_arguments == "Manip Data":
        preprocess_data(model_paths)

    elif system_arguments == "Train Model":
        models.train_model(model_paths)

    elif system_arguments == "Train Transfer Model":
        models.train_model(model_paths, transfer=True)

    elif system_arguments == "Predict":
        models.calculate_predictions(model_paths.valid_solutions,
                                     model_paths.valid_image_path,
                                     model_paths.valid_true,
                                     model_paths.valid_preds,
                                     model_paths.checkpoint_overall_path)
        models.eval_metrics(model_paths.valid_true, model_paths.valid_preds,
                            model_paths.valid_conf_matrix,
                            model_paths.valid_other_metrics)

    elif system_arguments == "Predict Test":
def run_ablations(model, num_ablations):
    '''set up some persistent tracking variables'''
    remove_features = []  # list of features we are removing
    metrics_list = []  # list storing dictionary of performance metrics
    # feature indexes
    full_state_index = np.arange(7, 61)
    input_state = 54
    # create loss function
    criterion = nn.MSELoss(reduction='sum')
    # define optimization method
    optimizer = opt.Adam(model.parameters(), lr=0.01)
    param_count = []
    param_count.append(count_params(model))
    current_feature_list = np.array(qty)

    # create the dataloader
    dataloaders, dataset_sizes = dat.init_dataset(train_list, val_list,
                                                  val_list, model_type,
                                                  config_dict)

    print('evaluating full model predictions...')
    predictions = mdl.evaluate_model(model,
                                     dataloaders['test'],
                                     model_type=model_type,
                                     no_pbar=True)
    # compute the loss statistics
    print('computing full model performance metrics...')
    metrics = model_eval.compute_loss_metrics(
        predictions, dataloaders['test'].dataset.label_array[:, 1:4])
    metrics_list.append(metrics)
    print('Performance Summary of Full Model:')
    print(metrics)

    print('Running ablation study on model type:' + model_type)

    for iteration in range(num_ablations):
        print('-' * 10)
        print('Begin ablation run: {}/{}'.format(iteration + 1, num_ablations))
        print('-' * 10)

        # compute the backprop values:
        gbp_data = model_eval.compute_GBP(model,
                                          dataloaders['test'],
                                          num_state_inputs=input_state,
                                          model_type=model_type,
                                          no_pbar=True)
        # evaluate means
        df_gbp_means = model_eval.compute_and_plot_gbp(gbp_data,
                                                       current_feature_list,
                                                       True,
                                                       suppress_plots=True)
        # group by feature type and rank by value
        df_gbp_means = df_gbp_means.groupby('feature').mean().sort_values(
            by='gbp', ascending=False).reset_index()
        # get top ranking value and append to removal list
        feature_to_remove = df_gbp_means.iloc[0, 0]
        print("removing " + feature_to_remove + "...")
        remove_features.append(feature_to_remove)
        # create the mask
        mask = np.isin(qty, remove_features, invert=True)
        # mask the full state vector in config_dict global variable
        config_dict['custom_state'] = full_state_index[mask]
        current_feature_list = np.array(qty)[
            mask]  #update the current feature list
        # decrease the input dimension of the model by one
        input_state = input_state - 1

        # redefine the models
        print('redefining model with input state dims: {}'.format(input_state))
        if model_type == "VS":
            model = mdl.StateVisionModel(30,
                                         input_state,
                                         3,
                                         feature_extract=feat_extract)
        elif model_type == "S":
            model = mdl.StateModel(input_state, 3)

        # recalculate the number of parameters
        param_count.append(count_params(model))

        # redefine the optimizer
        optimizer = opt.Adam(model.parameters(), lr=0.01)

        # redefine the dataloader
        dataloaders, dataset_sizes = dat.init_dataset(train_list, val_list,
                                                      val_list, model_type,
                                                      config_dict)

        # retrain the model
        model, train_history, val_history = mdl.train_model(
            model,
            criterion,
            optimizer,
            dataloaders,
            dataset_sizes,
            num_epochs=50,
            model_type=model_type,
            weight_file=weight_file,
            no_pbar=True)
        print('retraining completed')
        # do inference
        print('evaluating model predictions...')
        predictions = mdl.evaluate_model(model,
                                         dataloaders['test'],
                                         model_type=model_type,
                                         no_pbar=True)
        # compute the loss statistics
        print('computing performance metrics...')
        metrics = model_eval.compute_loss_metrics(
            predictions, dataloaders['test'].dataset.label_array[:, 1:4])
        metrics_list.append(metrics)
        print('Performance Summary:')
        print(metrics)

    return remove_features, param_count, metrics_list
Exemple #22
0
# Add this project to the path
import os; import sys; currDir = os.path.dirname(os.path.realpath("__file__"))
rootDir = os.path.abspath(os.path.join(currDir, '..')); sys.path.insert(1, rootDir)

import warnings
warnings.filterwarnings("ignore")

# My modules
from models.train_model import *
from features.build_features import *

features_pipeline = data_preparation()
train_model(features_pipeline)


gs = ...
cross_val_score(estimator=gs
Exemple #23
0
import numpy as np

import constants as cst
import import_data
import models
import attack_function
import visualization

if __name__ == '__main__':
    # Load CIFAR10 data
    x_train, y_train, x_test, y_test = import_data.format_data()

    # Train or Load standard model (trained on not attacked data)
    if cst.TRAIN_standard_model:
        print("Training a new standard model")
        model = models.train_model(x_train, y_train, cst.config_standard_model,
                                   'standard_model')
    else:
        print("Loading standard model")
        model = load_model(cst.STANDARD_trained_model)

    # Attack Data
    if cst.MAKE_ATTACK:
        print("Attacking data")
        x_test_attacked = attack_function.make_attack(
            x_test, y_test, model, 'x_test_attacked_FGSM_0_03',
            cst.attack_style)
    else:
        print("Load attacked data")
        x_test_attacked = np.load(cst.ATTACKED_TEST)

    if cst.VIZ:
Exemple #24
0
                  mlp_layers_number=mlp_layers_number,
                  time_steps=seq_length,
                  learning_rate=learning_rate,
                  optimizer=optimizer,
                  metrics=metrics,
                  features_number=features_number)

train_model(model,
            features_train,
            annot_train,
            features_valid,
            annot_valid,
            output_class_weights=[classWeights],
            batch_size=batch_size,
            epochs=epochs,
            seq_length=seq_length,
            save=save,
            saveMonitor=saveMonitor,
            saveMonitorMode=saveMonitorMode,
            saveBestName=saveBestName,
            reduceLrOnPlateau=reduceLrOnPlateau,
            reduceLrMonitor=reduceLrMonitor,
            reduceLrMonitorMode=reduceLrMonitorMode,
            reduceLrPatience=reduceLrPatience,
            reduceLrFactor=reduceLrFactor)

# Test
model.load_weights(saveBestName + '-best.hdf5')

#predict_test = np.zeros((annot_test.shape[1],annot_test.shape[2]))
nRound = annot_test.shape[1] // seq_length
timestepsRound = nRound * seq_length
def show_svd_demo(baseline_weights_file=None,
                  input_mixup_weights_file=None,
                  manifold_mixup_weights_file=None):
    print("~~~~~~~~~~~~~~")
    print("~~ SVD DEMO ~~")
    print("~~~~~~~~~~~~~~")

    x_train, y_train, x_val, y_val, x_test, y_test = data_loader.get_mnist_data(
    )

    mnist_model_b12 = models.create_mnist_model_bottleneck_12(
        mixup_mode=MixupMode.NO_MIXUP)
    mnist_model_b12_with_input_mixup = models.create_mnist_model_bottleneck_12(
        mixup_mode=MixupMode.INPUT_MIXUP)
    mnist_model_b12_with_manifold_mixup = models.create_mnist_model_bottleneck_12(
        mixup_mode=MixupMode.MANIFOLD_MIXUP)

    print("baseline mnist 12-node bottleneck model")
    if baseline_weights_file:
        print("Loading model from file {}...".format(baseline_weights_file))
        mnist_model_b12.load_weights(baseline_weights_file)
    else:
        print("Training...")
        models.train_model(
            mnist_model_b12, (x_train, y_train), (x_val, y_val),
            epochs=6,
            save_to_file=get_path_for_trained_models(mnist_model_b12))

    print("Test Accuracy: {:.3f}".format(
        mnist_model_b12.get_accuracy(x_test, y_test)))

    print("mnist 12-node bottleneck model with input mixup")
    if input_mixup_weights_file:
        print("Loading model from file {}...".format(input_mixup_weights_file))
        mnist_model_b12_with_input_mixup.load_weights(input_mixup_weights_file)
    else:
        print("Training...")
        models.train_model(mnist_model_b12_with_input_mixup,
                           (x_train, y_train), (x_val, y_val),
                           save_to_file=get_path_for_trained_models(
                               mnist_model_b12_with_input_mixup))

    print("Test Accuracy: {:.3f}".format(
        mnist_model_b12_with_input_mixup.get_accuracy(x_test, y_test)))

    print("mnist 12-node bottleneck model with manifold mixup")
    if manifold_mixup_weights_file:
        print("Loading model from file {}...".format(
            manifold_mixup_weights_file))
        mnist_model_b12_with_manifold_mixup.load_weights(
            manifold_mixup_weights_file)
    else:
        print("Training...")
        models.train_model(mnist_model_b12_with_manifold_mixup,
                           (x_train, y_train), (x_val, y_val),
                           save_to_file=get_path_for_trained_models(
                               mnist_model_b12_with_manifold_mixup))

    print("Test Accuracy: {:.3f}".format(
        mnist_model_b12_with_manifold_mixup.get_accuracy(x_test, y_test)))

    visualizer.compare_svd_for_b12_models([
        mnist_model_b12, mnist_model_b12_with_input_mixup,
        mnist_model_b12_with_manifold_mixup
    ], x_train, y_train)
Exemple #26
0
fig_save_path = Path(r'./plots/')

# path for the model trained with a reserved holdout dataset
h_path = Path(rf'./model_files/experiment_{exp_num:02d}_model_with_holdout.pt')

# path for the model trained without a reserved holdout dataset
f_path = Path(rf'./model_files/experiment_{exp_num:02d}_model_final.pt')

### Code

data = get_data(compounds, round_comp_decimal=3, round_temperature_decimal=0)

model_h = train_model(Model,
                      patience,
                      data,
                      compounds,
                      holdout_size,
                      dataloader_num_workers,
                      max_epochs,
                      save_path=h_path)

model_f = train_model(Model,
                      patience,
                      data,
                      compounds,
                      False,
                      dataloader_num_workers,
                      max_epochs,
                      save_path=f_path)

if COMPUTE_METRICS:
    from functools import partial
Exemple #27
0
                    
# Data and model checkpoints directories
parser.add_argument('--data_dir', type=str, default='',
                    help='data directory containing input.txt with training examples')     
                    
args = parser.parse_args()
files = os.listdir(args.data_dir)
print(files)

##
# Need to save model weights, history to S3
##
# Save final model out to opt.
##

files = ['ID_0a336e630', 'ID_0ba79c0ef', 'ID_0bc7199c6']
#path = '../Example Bucket'


train_gen = dw.DataGenerator(folder=args.data_dir,batch_size=1, file_list=files, shuffle=False)
test_gen = dw.DataGenerator(folder=args.data_dir,batch_size=1, file_list=files, shuffle=False)

model = md.unet2D(input_size = (512,512,4))
history = md.train_model(model, train_gen, test_gen, name="model", checkpoint_dir=args.final_model, epochs=3)

print(args.final_model)

#model.save(args.final_model + '/trainedmodel.h5') # saving the model
with open(args.final_model + '/trainHistoryOld', 'wb') as handle: # saving the history of the model
    pickle.dump(history.history, handle)
        model = mdl.StateVisionModel(30,
                                     54,
                                     3,
                                     feature_extract=feat_extract,
                                     TFN=True)
    elif model_type == "S":
        model = mdl.StateModel(54, 3)
    elif (model_type == "V") or (model_type == "V_RNN"):
        #model = mdl.VisionModel(3)
        model = mdl.BabyVisionModel()

    weight_file = weight_file + "_fffsdata.dat"

    # create loss function
    criterion = nn.MSELoss(reduction='sum')
    # define optimization method
    optimizer = opt.Adam(model.parameters(), lr=1e-3, weight_decay=0)
    #optimizer = opt.SGD(model.parameters(),lr=1e-5,weight_decay=0,momentum=0.9)
    model, train_history, val_history, _ = mdl.train_model(
        model,
        criterion,
        optimizer,
        dataloaders,
        dataset_sizes,
        num_epochs=100,
        L1_loss=1e-3,
        model_type=model_type,
        weight_file=weight_file,
        suppress_log=False,
        multigpu=False)
Exemple #29
0
                  img_height=imgHeight,
                  cnnType=cnnType,
                  cnnFirstTrainedLayer=cnnFirstTrainedLayer,
                  cnnReduceDim=cnnReduceDim)

history = train_model(model,
                      features_train,
                      annot_train,
                      features_valid,
                      annot_valid,
                      output_class_weights=[classWeightFinal],
                      batch_size=batch_size,
                      epochs=epochs,
                      seq_length=seq_length,
                      save=save,
                      saveMonitor=saveMonitor,
                      saveMonitorMode=saveMonitorMode,
                      saveBestName=saveBestName,
                      reduceLrOnPlateau=reduceLrOnPlateau,
                      reduceLrMonitor=reduceLrMonitor,
                      reduceLrMonitorMode=reduceLrMonitorMode,
                      reduceLrPatience=reduceLrPatience,
                      reduceLrFactor=reduceLrFactor,
                      features_type=inputFeaturesFrames,
                      img_width=imgWidth,
                      img_height=imgHeight,
                      cnnType=cnnType)

# Results
print('Results')
model.load_weights(saveBestName + '-best.hdf5')
dataGlobal[outputName][timeString]['results'] = {}
import sys
from lib import upload_dataset
from models import train_model, measure_test_accuracy


architecture = int(sys.argv[1])
batch_size = int(sys.argv[2])
latent_dim = int(sys.argv[3])
epochs = int(sys.argv[4])
print("yes 1")
(train_encoder_input, train_decoder_input, train_decoder_target), \
(test_encoder_input, test_decoder_input, test_decoder_target) = upload_dataset()

print(train_encoder_input.shape, train_decoder_input.shape, train_decoder_target.shape)

model, encoder_states = train_model(encoder_input_data=train_encoder_input,
                                    decoder_input_data=train_decoder_input,
                                    decoder_target_data=train_decoder_target,
                                    batch_size=batch_size,
                                    latent_dim=latent_dim,
                                    epochs=epochs,
                                    model_architecture=architecture)


# accuracy = measure_test_accuracy(test_decoder_input, model, encoder_states, latent_dim=512)