Esempio n. 1
0
def evaluate_model(model, X, Y, args):
    loss_train, acc_train = model.evaluate(X, Y, verbose=0)
    loss_eval, acc_eval, loss_test, acc_test = None, None, None, None
    logs_dir = get_logs_dir(args)
    records = {"train": {"loss": loss_train, "accuracy": acc_train}}
    if args.eval:
        X, Y = load_data(args, data_partition="val")
        loss_eval, acc_eval = model.evaluate(X, Y, verbose=0)
        records["eval"] = {"loss": loss_eval, "accuracy": acc_eval}
    if args.test:
        X, Y = load_data(args, data_partition="test")
        loss_test, acc_test = model.evaluate(X, Y, verbose=0)
        records["test"] = {"loss": loss_test, "accuracy": acc_test}

    with open(logs_dir + '/log.txt', mode='w') as json_log:
        json.dump(records, json_log)
Esempio n. 2
0
def main():
    data_path = './data/ratings.csv'
    item_based_matrix_path = './matrix/item_based_matrix.csv'
    user_based_matrix_path = './matrix/user_based_matrix.csv'
    user_label = 'userId'
    item_label = 'movieId'
    score_label = 'rating'
    test_split = 0.15
    min_user_count = 100
    min_item_count = 100
    train_data, test_data = load_data(data_path,
                                      user_label,
                                      item_label,
                                      score_label,
                                      test_split,
                                      min_user_count=min_user_count,
                                      min_item_count=min_item_count)
    item_based_model = RecommendationSystem('item_based')
    user_based_model = RecommendationSystem('user_based')

    item_based_model.fit(train_data, user_label, item_label, score_label)
    print(
        'Item_based recommendation system test mean absolute error: {}'.format(
            item_based_model.test(test_data)))
    item_based_model.save_matrix(item_based_matrix_path)

    user_based_model.fit(train_data, user_label, item_label, score_label)
    print(
        'User_based recommendation system test mean absolute error: {}'.format(
            user_based_model.test(test_data)))
    user_based_model.save_matrix(user_based_matrix_path)
Esempio n. 3
0
def main(config):

    
    if config.validate:
        output_len = config.output_seq_length
        l = config.num_layers
        loss = config.loss
        sl = config.num_stochastic_layers
        config = pickle.load( open( 'saved_models/'+config.model_name+'/config.p', "rb" ))
        config.validate = True
        config.simulate = False
        config.output_seq_length = output_len
        config.num_layers = l
        config.num_stochastic_layers = sl
        config.loss = loss
        print(config)

    t1 = time.time()
    data_folder =  os.path.abspath(os.path.abspath("../../../../"))+'/data/'
    dataset = load_data(data_folder, config)

    t2 = time.time()
    print('Finished loading the dataset: ' + str(t2-t1) +' sec \n')

    model = PricePredictor(config, dataset)
    if config.validate:
        # model._make_figs(steps = config.output_seq_length, epoch=200)
        # model._validate(steps = config.output_seq_length, epoch=160)
        model._backtest(epoch=160)
    else:
        model._train()
def main():

    if len(sys.argv) == 2:
        input_data = INPUT_DATA_FILEPATH + sys.argv[1]
    else:
        print('Please provide the filename of the data file in the Data/Input directory'\
              'containing the target and feature variables.')


    for upsampled in [False, True]:

        # load data- refer to load_data.py script for train test split and how to structure input dataframe
        labels, features, target, X_train, X_test, y_train, y_test = load_data(input_data, upsampled=upsampled)

        # iterate through models and visualizers to create and save yellowbrick visualizers to img directory
        img_results = [create_img(X_train, X_test, y_train, y_test, labels, model, visualizer, upsampled, IMG_OUTPUT_FILEPATH) for visualizer in VISUALIZERS for model in MODELS]

        # saves string value of model name as key and sklearn classification_report output_dict as value
        report_dict = {str(model).split('(')[0]: evaluate_model(model, X_train, y_train, X_test, y_test) for model in MODELS}

        # create pandas dataframe of report_dict and transpose
        report_df = pd.DataFrame.from_dict(report_dict).T

        # format report_df dataframe for use in app.py Dash Plotly heatmap
        revised_report_df = revise_report_df(report_df)

        if upsampled ==True:
            revised_report_df.to_csv(OUTPUT_DATA_FILEPATH + 'report_df_upsampled.csv')
        else:
            revised_report_df.to_csv(OUTPUT_DATA_FILEPATH  + 'report_df.csv')
Esempio n. 5
0
def main(config):

    if config.validate:
        config = pickle.load(
            open('saved_models/' + config.model_name + '/config.p', "rb"))
        config.validate = True
        config.simulate = False
        print(config)

    t1 = time.time()
    data_folder = os.path.abspath(os.path.abspath("../../../")) + '/data/'
    dataset = load_data(data_folder, config)

    t2 = time.time()
    print('Finished loading the dataset: ' + str(t2 - t1) + ' sec \n')

    model = PricePredictor(config, dataset)
    if config.validate:
        # model._validate( epoch=70)
        # model._make_figs(epoch=70)
        model._make_figs2(epoch=70)

    elif config.tsne:
        model._tsne(epoch=70)
    else:
        model._train()
Esempio n. 6
0
def main(config):

    if config.validate:
        output_len = config.output_seq_length
        file_path = config.file_path
        seed = config.seed
        loss = config.loss
        target = config.target
        config = pickle.load(
            open('saved_models/' + config.model_name + '/config.p', "rb"))
        config.validate = True
        config.file_path = file_path
        config.output_seq_length = output_len
        config.seed = seed
        config.loss = loss
        config.backtest_target = 'close'
        config.target = 'NDX'
        print(config)

    t1 = time.time()
    data_folder = os.path.abspath(os.path.abspath("../../../../")) + '/data/'
    dataset = load_data(data_folder, config)

    t2 = time.time()
    print('Finished loading the dataset: ' + str(t2 - t1) + ' sec \n')

    model = PricePredictor(config, dataset)

    if config.validate:
        model._backtest(epoch=150)
        # model._validate(steps = config.output_seq_length, epoch=150)
    else:
        model._train()
Esempio n. 7
0
def run(data_dir, model_path, embed_path, report_file):
    logger.info("loading dataset.")
    num_class = 2
    adj_lists, feat_data, labels = load_data(data_dir, args.keyword, args.view)
    data = dict()
    data['feat_data'] = feat_data
    data['labels'] = labels

    if args.model == "GraphSage":
        data['adj_lists'] = adj_lists[0]
        model = GraphSageHandler(num_class, data, args)
    elif args.model == "HANSage":
        data['adj_lists'] = adj_lists
        model = HANSageHander(num_class, data, args)
    else:
        raise ("error")
    # ret_tuple, df = model.train(epoch=args.epoches, interval_val=args.interval_eval)
    ret_tuple, df = model.train_ddc(epoch=args.epoches,
                                    interval_val=args.interval_eval)

    df.to_csv("{}/rret/{}_{}.csv".format(data_dir, args.keyword, args.view),
              index=False)

    import pickle as pkl
    with open('ret_tuple_{}.pkl'.format(args.view), 'wb') as f:
        pkl.dump(ret_tuple, f)
    model.save_mode(model_path, report_file)
Esempio n. 8
0
def train():
    epoches=args.iteration
    lr=args.lr
    batch=args.batch
    model_name=args.model

    model=network(feature_len,100,label_num)
    op=torch.optim.SGD(model.parameters(),lr=lr,momentum=0.5)
    dataloader=load_data()
    loss_value=np.zeros((int(val_num/batch),),dtype=np.float32)
    print('start training')
    start_time=time.time()
    for epoch in range(epoches):
        cnt=0
        for data,label,adj in dataloader.train_loader(batch):
            data=torch.from_numpy(data)
            label=torch.LongTensor(label)
            adj=torch.from_numpy(adj)
            output=model(adj,data)
            loss=torch.nn.functional.nll_loss(output,label)
            op.zero_grad()
            loss.backward()
            op.step()
        for data,label,adj in dataloader.val_loader(batch):
            data=torch.from_numpy(data)
            label=torch.LongTensor(label)
            adj=torch.from_numpy(adj)
            output=model(adj,data)
            loss=torch.nn.functional.nll_loss(output,label)
            loss_value[cnt]=loss
            cnt+=1
        now_time=time.time()
        print('epoch:',epoch+1,'time:',now_time-start_time,'loss:',np.mean(loss_value))
    torch.save(model,'./model/'+model_name)
Esempio n. 9
0
def main(config):

    if config.validate:
        output_len = config.output_seq_length
        config = pickle.load(
            open('saved_models/' + config.model_name + '/config.p', "rb"))
        config.validate = True
        config.output_seq_length = output_len
        config.num_layers = 6
        print(config)

    elif config.backtest:
        config = pickle.load(
            open('saved_models/' + config.model_name + '/config.p', "rb"))
        config.backtest = True
        print(config)

    t1 = time.time()
    data_folder = os.path.abspath(os.path.abspath("../../../../")) + '/data/'
    dataset = load_data(data_folder, config)

    t2 = time.time()
    print('Finished loading the dataset: ' + str(t2 - t1) + ' sec \n')
    model = PricePredictor(config, dataset)

    if config.validate:
        model._validate(steps=config.output_seq_length, epoch=40)
        # model._make_figs(steps = config.output_seq_length, epoch=40)
    if config.backtest:
        model._backtest2(epoch=180)
    else:
        model._train()
Esempio n. 10
0
def get_most_var_idx(config, model):
    # load data
    data = load_data(config)
    print("[INIT] shape data", np.shape(data[0]))

    # predict feature maps input
    preds = model.predict(data[0], verbose=True)
    preds_flat = np.reshape(preds, (len(preds), -1))
    print("[FIT] shape preds", np.shape(preds))

    # keep only highest variance
    var = np.std(preds_flat, axis=0)
    print("[FIT] shape var", np.shape(var))
    # get the n_component's max index
    index = np.flip(np.argsort(var))[:config['PCA']]
    # transform index to feature maps index
    x, y, k = np.unravel_index(index, np.shape(preds)[1:])
    print("ft index", k)

    # clean list by removing repetitions
    ft_index = []
    for idx in k:
        exists = False
        for ft_idx in ft_index:
            if idx == ft_idx:
                exists = True

        if not exists:
            ft_index.append(idx)

    print("feat maps index")
    print(ft_index)
    print()

    return ft_index
Esempio n. 11
0
def get_data(data_set_name):
    path_train = '../data/data/' + data_set_name + '_train.dat'
    path_test = '../data/data/' + data_set_name + '_test.dat'
    dataset_class = load_data(path_train=path_train, path_test=path_test,
                              header=['user_id', 'item_id', 'rating'],
                              sep='\t', print_log=False)
    attack_info_path = ["../data/data/" + data_set_name + "_selected_items",
                        "../data/data/" + data_set_name + "_target_users"]
    attack_info = load_attack_info(*attack_info_path)
    return dataset_class, attack_info
class Migration(migrations.Migration):

    dependencies = [
        ('export', '0001_initial'),
    ]

    operations = [
        migrations.RunPython(
            load_data('export/migrations/data/admin_metadata.json',
                      'default')),
    ]
Esempio n. 13
0
 def __init__(self,
              data_dir,
              rolling_mean,
              input_length,
              output_length,
              scaler=None,
              single_file=None):
     self.data, self.label, self.scaler, self.tickers = load_data(
         data_dir,
         rolling_mean,
         input_length,
         output_length,
         scaler=scaler,
         single_file=single_file)
Esempio n. 14
0
def main(args):
    X, Y = load_data(args)
    print("Data Loaded")

    model = None
    if args.train:
        model = assemble_model(args)
        model.compile("adam", "categorical_crossentropy", ["accuracy"])
        model = fit_model(model, X, Y, args)
        print("Model was fitted!")
    else:
        logs_dir = get_logs_dir(args) + "/train"
        model = load_model(logs_dir + '/model.h5')

    evaluate_model(model, X, Y, args)
Esempio n. 15
0
def main(config):

    if config.validate:
        output_len = config.output_seq_length
        file_path = config.file_path
        seed = config.seed
        loss = config.loss
        l = config.num_layers
        sl = config.num_stochastic_layers
        config = pickle.load(
            open('saved_models/' + config.model_name + '/config.p', "rb"))
        config.validate = True
        config.file_path = file_path
        config.output_seq_length = output_len
        config.seed = seed
        config.loss = loss
        config.num_layers = l
        config.num_stochastic_layers = sl
        config.backtest_target = 'close_btc'
        config.target = 'lr_btc'
        config.model_name = 'vaegan_mv_hour_'
        print(config)

    elif config.backtest:
        config = pickle.load(
            open('saved_models/' + config.model_name + '/config.p', "rb"))
        config.backtest = True
        config.validate = False
        print(config)

    t1 = time.time()
    data_folder = os.path.abspath(os.path.abspath("../../../../")) + '/data/'
    dataset = load_data(data_folder, config)

    t2 = time.time()
    print('Finished loading the dataset: ' + str(t2 - t1) + ' sec \n')
    model = PricePredictor(config, dataset)

    if config.validate:
        model._validate(steps=config.output_seq_length, epoch=500)
        # model._make_figs(steps = config.output_seq_length, epoch=200)
        # model._backtest(epoch=500)

    else:
        model._train()
Esempio n. 16
0
def main():
    adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(
        "cora")
    features, _ = preprocess_features(features)
    features = features[:, :100]
    g1 = nx.from_scipy_sparse_matrix(adj)
    g2 = create_align_graph(g1, remove_rate=0.2, add_rate=0)
    g2, original_to_new = shuffle_graph(g2)
    C = config()
    features1 = features
    new2original = {
        original_to_new[i]: i
        for i in range(nx.number_of_nodes(g2))
    }
    cols = [new2original[i] for i in range(nx.number_of_nodes(g2))]
    features2 = features[cols]
    model = DGA(g1, g2, features1, features2, C)
    model.train(original_to_new)
Esempio n. 17
0
def main(study_name, is_resume_study):
    df_train, _ = load_data(scaled=False)
    df_train = df_train.loc[:, sorted(df_train.columns)]
    X_train = df_train.drop([
        'HasDetections', 'MachineIdentifier', 'machine_id', 'AvSigVersion_1',
        'test_probability'
    ],
                            axis=1)
    y_train = df_train['HasDetections']

    f = partial(objective, X_train, y_train, df_train)
    if is_resume_study:
        study = optuna.Study(study_name=study_name,
                             storage='sqlite:///example.db')
    else:
        study = optuna.create_study(study_name=study_name,
                                    storage='sqlite:///example.db')
    study.optimize(f, n_trials=50)
    print('params:', study.best_params)
Esempio n. 18
0
    def test_accuracy_custom(self):
        (x_train, x_test), (y_train, y_test) = load_data()

        correct = 0

        print('start')

        for i in range(len(x_test)):
            prediction = self.predict(x_test[i])[1].replace('%', '')

            if y_test[i] == 1 and float(prediction) >= 50:
                correct += 1
            elif y_test[i] == 0 and float(prediction) < 50:
                correct += 1
            else:
                print(
                    f'{x_test[i]} ||| Expected: {y_test[i]} ||| Actual: {prediction}'
                )

        print(f'{correct}/20000 ||| {correct/20000}')
def evaluate(amod, hmod):
    '''

        Argumensts : amod  : pytorch model for processing image
                     loader : pytorch dataloader of the Image

        Returns    : a tuple of dictionaries containg the output value

        Description :
                The image values are feed into model giving resultant value of identified class

    '''

    animal_predict, habitat_predict, animal_df, habitat_df = load_data()

    animal = animal_prediction(animal_predict, animal_class_name(), animal_df,
                               amod)

    habitat = habitat_prediction(habitat_predict, habitat_class_name(),
                                 habitat_df, hmod)

    return animal, habitat
Esempio n. 20
0
def run_test(model_path,
             data_dir='data/macenko/',
             epoch=15,
             batch_size=128,
             input_shape=(96, 96)):

    model = load_model(model_path, custom_objects={'auc': auc})

    x_test, y_test_true = load_data(data_dir, purpose='test', norm='macenko')

    # indexes
    test_id = np.arange(len(x_test))

    partition = {}
    partition['test'] = test_id

    test_labels = {str(i): y_test_true[i].flatten()[0] for i in test_id}

    # Parameters for generators
    params = {
        'dim': input_shape,
        'batch_size': batch_size,
        'n_classes': 2,
        'aug': False,
        'shuffle': False
    }

    # Generators
    test_generator = DataGenerator(partition['test'], x_test, test_labels,
                                   **params)

    preds = model.predict_generator(test_generator)

    true_labels = np.array(y_test_true).flatten()
    pred_labels = np.array([p[1] for p in preds])
    calculate_auc(true_labels, pred_labels)
Esempio n. 21
0
def train_rec(data_set_name, model_name, attack_method, target_id, is_train):
    if attack_method == "no":
        attack_method = ""
        model_path = "../result/model_ckpt/" + '_'.join([model_name, data_set_name]) + ".ckpt"
    else:
        model_path = "../result/model_ckpt/" + '_'.join([model_name, data_set_name, attack_method]) + ".ckpt"
    path_train = "../data/data_attacked/" + '_'.join([data_set_name, str(target_id), attack_method]) + ".dat"
    path_test = "../data/data/" + data_set_name + "_test.dat"
    if attack_method == "": path_train = "../data/data/" + data_set_name + "_train.dat"

    # load_data
    dataset_class = load_data(path_train=path_train, path_test=path_test,
                              header=['user_id', 'item_id', 'rating'],
                              sep='\t', print_log=True)
    # train rec
    if model_name in ["IAutoRec", "UAutoRec", "NNMF"]:
        predictions, hit_ratios = rec_trainer(model_name, dataset_class, target_id, is_train, model_path)
    else:
        predictions, hit_ratios = basic_rec(model_name, path_train, path_test, target_id)

    # write to file
    dst_path = "../result/pred_result/" + '_'.join([model_name, data_set_name, str(target_id), attack_method])
    dst_path = dst_path.strip('_')
    target_prediction_writer(predictions, hit_ratios, dst_path)
configs = []
for config_name in config_names:
    configs.append(load_config(config_name))

# train models/load model
norm_base_list = []
for config in configs:
    try:
        if retrain:
            raise IOError("retrain = True")
        norm_base = NormBase(config,
                             input_shape=(224, 224, 3),
                             save_name=config["sub_folder"])
    except IOError:
        norm_base = NormBase(config, input_shape=(224, 224, 3))
        norm_base.fit(load_data(config,
                                train=config["train_dim_ref_tun_ref"][0]),
                      fit_dim_red=True,
                      fit_ref=False,
                      fit_tun=False)
        norm_base.save_model(config, config["sub_folder"])
    norm_base_list.append(norm_base)

# extract PCA
pca1_human = norm_base_list[0].pca.components_[0]
pca1_monkey = norm_base_list[1].pca.components_[0]


def plot_pca(component, folder, figname):
    if isinstance(component, list):
        is_list = True
        vectors = component
Esempio n. 23
0
import os
import sys

sys.path.append(os.path.join(os.path.dirname(__file__), '../'))

from text_cnn.graph import Graph
import tensorflow as tf
from utils.load_data import load_data
from text_cnn import args

x, y = load_data('input/train.csv', data_size=None)
eval_index = int(0.9 * len(x))
x_train, y_train = x[0:eval_index], y[0:eval_index]
x_eval, y_eval = x[eval_index:], y[eval_index:]

x_holder = tf.placeholder(dtype=tf.int32,
                          shape=(None, args.seq_length),
                          name='x')
y_holder = tf.placeholder(dtype=tf.int32, shape=None, name='y')

dataset = tf.data.Dataset.from_tensor_slices((x_holder, y_holder))
dataset = dataset.batch(args.batch_size).repeat(args.epochs)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()

model = Graph()
saver = tf.train.Saver()

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.9
Esempio n. 24
0
def main():
    args = cmd_parser()
    model_name = args.model_name

    # Check inputs
    resnetxx = ["ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152"]
    available_models = ["LeNet5", "AttentionLeNet5", "LeCunLeNet5"] + resnetxx
    if args.model_name not in available_models:
        raise ValueError(
            f"""args.model_name {args.model_name} NOT in {available_models}""")

    if args.attention:
        if args.attention_type == "senet":
            model_name = "AttentionLeNet5_SeNet"
        elif args.attention_type == "official":
            model_name = "AttentionLeNet5_Official"

    # Config paths
    date_time = datetime.now().strftime("%Y%m%d-%H%M%S")
    prefix = os.path.join("~", "Documents", "DeepLearningData", "mnist")

    # Prepare data
    dataset = "mnist"
    (train_images,
     train_labels), (test_images, test_labels) = load_data(dataset=dataset,
                                                           if_categorical=True,
                                                           if_expand_dims=True,
                                                           if_normalized=False)

    input_shape = train_images.shape[1:]
    num_classes = train_labels.shape[1]

    # Setup model
    if model_name not in resnetxx:
        model = create_model(model_name,
                             input_shape=input_shape,
                             num_classes=num_classes)
        optimizer = create_optimizer("Adam", learning_rate=0.001)

    # Preprocessing and choose optimizer for ResNet18
    elif model_name in resnetxx:
        model_core = create_model(model_name,
                                  input_shape=(32, 32, 1),
                                  num_classes=num_classes)

        input_ = tf.keras.layers.Input(input_shape, dtype=tf.uint8)
        x = tf.cast(input_, tf.float32)
        # padding 28x28 to 32x32
        x = tf.pad(x, paddings=[[0, 0], [2, 2], [2, 2], [0, 0]])
        x = model_core(x)
        model = tf.keras.Model(inputs=[input_], outputs=[x])
        optimizer = tf.keras.optimizers.Adam(
            learning_rate=polynomial_schedule(0))

    subfix = os.path.join(model_name, date_time)
    ckpt_dir = os.path.expanduser(os.path.join(prefix, subfix, "ckpts"))
    log_dir = os.path.expanduser(os.path.join(prefix, subfix, "logs"))
    os.makedirs(ckpt_dir, exist_ok=True)
    os.makedirs(log_dir, exist_ok=True)

    loss = tf.keras.losses.CategoricalCrossentropy(
        name="categorical_crossentropy")
    from tensorflow.keras.metrics import BinaryAccuracy, CategoricalAccuracy
    metrics = [
        BinaryAccuracy(name="binary_accuracy"),
        CategoricalAccuracy(name="categorical_accuracy")
    ]

    model.compile(loss=loss, optimizer=optimizer, metrics=metrics)

    # Define callbacks
    from tensorflow.keras.callbacks import CSVLogger, LearningRateScheduler, TensorBoard, ModelCheckpoint

    lr_scheduler = LearningRateScheduler(polynomial_schedule, verbose=1)
    csv_logger = CSVLogger(os.path.join(log_dir, "training.log.csv"),
                           append=True)
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir,
                                                          histogram_freq=1,
                                                          update_freq="batch")
    # without .h5 extension
    ckpt_filename = "%s-epoch-{epoch:03d}-categorical_accuracy-{categorical_accuracy:.4f}" % model_name
    ckpt_filepath = os.path.join(ckpt_dir, ckpt_filename)
    checkpoint_callback = ModelCheckpoint(filepath=ckpt_filepath,
                                          monitor="categorical_accuracy",
                                          verbose=1,
                                          save_weights_only=True)

    callbacks = [
        csv_logger, lr_scheduler, checkpoint_callback, tensorboard_callback
    ]

    # Fit model
    epochs = 3 if if_fast_run else training_epochs
    model.fit(train_images,
              train_labels,
              validation_data=(test_images, test_labels),
              epochs=epochs,
              batch_size=batch_size,
              callbacks=callbacks)
v4_model = load_extraction_model(config,
                                 input_shape=tuple(config["input_shape"]))
v4_model = tf.keras.Model(inputs=v4_model.input,
                          outputs=v4_model.get_layer(
                              config['v4_layer']).output)
size_ft = tuple(np.shape(v4_model.output)[1:3])
print("[LOAD] size_ft", size_ft)
print("[LOAD] Model loaded")
print()

nb_model = NormBase(config, tuple(config['input_shape']))
# -------------------------------------------------------------------------------------------------------------------
# train

# load data
data = load_data(config)

# predict
preds = v4_model.predict(data[0], verbose=1)
print("[TRAIN] shape prediction", np.shape(preds))

# get feature maps that mimic a semantic selection pipeline
# keep only highest IoU semantic score
eyebrow_preds = preds[..., best_eyebrow_IoU_ft]
print("shape eyebrow semantic feature selection", np.shape(eyebrow_preds))
lips_preds = preds[..., best_lips_IoU_ft]
print("shape lips semantic feature selection", np.shape(lips_preds))

# compute dynamic directly on the feature maps
# eyebrow
dyn_eyebrow_preds = ref_feature_map_neuron(eyebrow_preds,
"""

# load config
config_name = 'NB_morph_space_semantic_pattern_m0002.json'
config = load_config(config_name, path='configs/norm_base_config')

full_train = False

# --------------------------------------------------------------------------------------------------------------------
# train model
if full_train:
    # declare model
    model = NormBase(config, input_shape=tuple(config['input_shape']))

    # load data
    data = load_data(config)

    # fit model
    face_neurons = model.fit(data)

    # save model
    model.save()
else:
    model = NormBase(config, input_shape=tuple(config['input_shape']), load_NB_model=True)

    # load data
    data = load_data(config)
    # plot_sequence(np.array(data[0]).astype(np.uint8), video_name='01_train_sequence.mp4',
    #               path=os.path.join("models/saved", config['config_name']))

    # fit model
Esempio n. 27
0
import os
import sys

sys.path.append(os.path.join(os.path.dirname(__file__), '../'))

from abcnn.graph import Graph
import tensorflow as tf
from utils.load_data import load_data
from abcnn import args

p, h, y = load_data('input/train.csv', data_size=None)
p_eval, h_eval, y_eval = load_data('input/dev.csv', data_size=1000)

p_holder = tf.placeholder(dtype=tf.int32,
                          shape=(None, args.seq_length),
                          name='p')
h_holder = tf.placeholder(dtype=tf.int32,
                          shape=(None, args.seq_length),
                          name='h')
y_holder = tf.placeholder(dtype=tf.int32, shape=None, name='y')

dataset = tf.data.Dataset.from_tensor_slices((p_holder, h_holder, y_holder))
dataset = dataset.batch(args.batch_size).repeat(args.epochs)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()

# model = Graph(False, False)
model = Graph(True, True)
saver = tf.train.Saver()

config = tf.ConfigProto()
Esempio n. 28
0
from models.NormBase import NormBase
import matplotlib.pyplot as plt

congig_path = '../../configs/norm_base_config'
# config_name = 'norm_base_monkey_test.json'
config_name = 'norm_base_affectNet_sub8_4000.json'
config_file_path = os.path.join(congig_path, config_name)
print("config_file_path", config_file_path)

# load norm_base_config file
with open(config_file_path) as json_file:
    config = json.load(json_file)

# load data
data = load_data(config, train=False, sort_by=['image'])
print("[Data] -- Data loaded --")

# create model
norm_base = NormBase(config, input_shape=(224, 224, 3))

# "load" model
load_folder = os.path.join("../../models/saved", config['save_name'])
r = np.load(os.path.join(os.path.join(load_folder, "ref_vector.npy")))
t = np.load(os.path.join(os.path.join(load_folder, "tuning_vector.npy")))
norm_base.set_ref_vector(r)
norm_base.set_tuning_vector(t)
print("[MODEL] Set ref vector", np.shape(r))
print("[MODEL] Set tuning vector", np.shape(t))

# predict tuning vector
Esempio n. 29
0
- config["plot_reduce"]: if True reduces the displayed feature maps to selection in highlight, improves performance
"""
<<<<<<< HEAD
# load config
# t0001: human_anger, t0002: human_fear, t0003: monkey_anger, t0004: monkey_fear  --> plot cnn_output
# t0005: human_anger, t0006: human_fear, t0007: monkey_anger, t0008: monkey_fear  --> plot difference, stride3, highlight max
# t0009: human_anger, t0010: human_fear, t0011: monkey_anger, t0012: monkey_fear  --> plot difference, first, highlight max
# t0013: human_anger, t0014: human_fear, t0015: monkey_anger, t0016: monkey_fear  --> plot difference, first, reduce max
# t0017: human_anger  --> plot difference, stride3, reduce max
# t0100: human_anger  --> plot maximum
# t0104: human_anger  --> plot weighted average
# t0108: human_anger  --> plot 10 biggest values (maximum10)
config = load_config("norm_base_animate_cnn_response_t0001.json", path="configs/norm_base_config")

# load images
images,_ = load_data(config, train=config["dataset"])

# load model
normbase = NormBase(config,(224,224,3))

# calculate vector and options for plot
if config["plot_option"]=='cnn_output':
    # plot cnn_response
    vector_plot = normbase.evaluate_v4(images, flatten=False)
elif config["plot_option"]=='cnn_output_difference':
    # take difference between response and reference, reference has different options
    response = normbase.evaluate_v4(images, flatten=False)
    if config["difference_option"]=='first':
        reference = response[0,...]
    elif 'stride' in config["difference_option"]:
        stride_length=int(config["difference_option"][6:])
config = load_config(config_name)

# fit models with each condition
avatars = ["human_orig", "monkey_orig", "all_orig"]
# avatars = ["human_orig"]
indexes = []
# pca_threshold = [300, 300, 1500]
pca_threshold = [600, 600, 2000]
for i, avatar in enumerate(avatars):
    # modify condition according to loop
    config['train_avatar'] = avatar

    # define and train norm base model
    norm_base = NormBase(config, input_shape=(224, 224, 3))
    norm_base.pca.var_threshold = pca_threshold[i]
    norm_base.fit(load_data(config, train=True),
                  fit_dim_red=True,
                  fit_ref=False,
                  fit_tun=False)

    # get index from the feature having the most variance
    predict_v4 = norm_base.v4_predict
    var_predict = np.std(predict_v4, axis=0)
    index = np.flip(np.argsort(var_predict))[:config['PCA']]

    # save index
    indexes.append(np.array(index))
indexes = np.array(indexes)

# get position within feature maps
v4_shape = norm_base.shape_v4