예제 #1
0
 def test_load_config(self):
     load_config(self.config_path)
     with open(self.config_path, "r") as f:
         obj = json.load(f)
     for k, v in obj.iteritems():
         assert getattr(config, k, None) == v
     assert config.RGB_MEAN == [123.68, 116.779, 103.939]
예제 #2
0
def main():
    config = load_config(SERVER_CONFIG_PATH)

    local_host = config.get("server_ip")
    #local_host = get_ip_address(config.get("local_interface")) 

    rpc_port = config.get("rpc_port")
    rpc_server = ThreadingXMLRPCServer((local_host, rpc_port),
                                       logRequests=False)
    #rpc_server.register_function(sign_in)
    #rpc_server.register_function(howru)
    rpc_server.register_instance(MonServer())
    threadinglize(rpc_server.serve_forever, "rpc_server")()
    logger.info("start RPC server on %s:%d" % (local_host, rpc_port))

    rrd_root = config.get("RRD_root", "/dev/shm")    
    rrd_handler = RRDHandler.getInstance(rrd_root)
    ds_port = config.get("ds_port")
    data_server = DataReciever((local_host, ds_port), rrd_handler)
    threadinglize(data_server.serve_forever, "data_server")()
    logger.info("start data server on %s:%d" % (local_host, ds_port))

    #model_int = Interface()
    agent_timeout = config.get("agent_timeout")
    check_alive_interval = config.get("check_alive_interval")
    scheduled_task(check_alive, "check_alive", True,
                   0, -1, check_alive_interval)(agent_timeout)
    logger.info("check_alive started...")

    while True:
        myprint(threading.enumerate())
        sleep(60)
예제 #3
0
파일: notes_app.py 프로젝트: m-j/notes-app
def main():
    config = load_config()

    Base.metadata.create_all(engine)

    # loop = asyncio.get_running_loop()
    # loop.run_in_executor(main)
    app = web.Application()
    app.add_routes(
        [web.get('/notes', get_notes),
         web.post('/notes', post_note)])

    web.run_app(app, port=config['port'])
예제 #4
0
def get_connection(db_type='oracle'):

    logging.info("Connecting to db.")
    config = load_config()

    if db_type == 'oracle':
        if get_connection.oracle_conn is None:
            config = config['oracle_db']
            dsn_config = config['dsn']
            dsn_tns = cx_Oracle.makedsn(**dsn_config)

            connection_config = config['connection']
            connection_config['dsn'] = dsn_tns
            get_connection.oracle_conn = cx_Oracle.connect(**connection_config)
        return get_connection.oracle_conn
    elif db_type == 'hive':
        if get_connection.hive_conn is None:
            config = config['hive_db']
            get_connection.hive_conn = hive.Connection(**config)
        return get_connection.hive_conn
    else:
        raise ValueError("DB Type can be : 'oracle' or 'hive' ")
예제 #5
0
    if save:
        plt.savefig(
            os.path.join("models/saved", config['config_name'],
                         "_direction_scatter.png"))


if __name__ == '__main__':
    """
    Example of use
    
    2021/01/04
    - model is trained in separate steps (specified in config)
    """

    config = load_config("norm_base_plotDirections_t0012.json")
    save_name = config["sub_folder"]
    retrain = False

    # model
    try:
        if retrain:
            raise IOError("retrain = True")
        norm_base = NormBase(config,
                             input_shape=(224, 224, 3),
                             save_name=save_name)
    except IOError:
        norm_base = NormBase(config, input_shape=(224, 224, 3))

        norm_base.fit(load_data(config,
                                train=config["train_dim_ref_tun_ref"][0]),
예제 #6
0
2021/01/21
This script checks the format of the dataset and its influence.
The documentation (https://keras.io/api/applications/vgg/) says:
    "Note: each Keras Application expects a specific kind of input preprocessing.
    For VGG19, call tf.keras.applications.vgg19.preprocess_input on your inputs before passing them to the model."
"""
import os
import numpy as np
import tensorflow as tf

from utils.load_config import load_config
from utils.load_data import load_data
from plots_utils.plot_cnn_output import plot_cnn_output

# parameter
config = load_config("norm_base_animate_cnn_response_t0001.json")
path ="models/saved/check_image_format"
if not os.path.exists(path): os.mkdir(path)

# load images
images,_ = load_data(config, train=config["dataset"])
image = images[0]

# check format, i expect RGB [0..255]
print("original shape:", image.shape)
print("original format:", image.dtype)
print("original value range:", [np.min(image), np.max(image)])
#original shape: (224, 224, 3)
#original format: float64
#original value range: [0.0, 237.0]
예제 #7
0
import numpy as np

from utils.load_config import load_config
from utils.load_data import load_data
from models.ExampleBase import ExampleBase
from datasets_utils.expressivity_level import segment_sequence
from plots_utils.plot_expressivity_space import plot_expressivity_level
"""
Reproduce the results from the ICANN paper but with the updated VGG pipeline

run: python -m projects.dynamic_facial_expressions_ICANN.03_reproduce_ICANN_ExampleBase
"""

# load config
# config = load_config("EB_reproduce_ICANN_cat.json", path="configs/example_base")
config = load_config("EB_reproduce_ICANN_expressivity.json",
                     path="configs/example_base")

# load model
model = ExampleBase(config,
                    input_shape=tuple(config['input_shape']),
                    load_EB_model=False)

# --------------------------------------------------------------------------------------------------------------------
# train model

# load data
train_data = load_data(config)
train_data[0] = segment_sequence(train_data[0], config['train_seg_start_idx'],
                                 config['seq_length'])
train_data[1] = segment_sequence(train_data[1], config['train_seg_start_idx'],
                                 config['seq_length'])
run: python -m tests.NormBase.t11a_dynamic_weighted_xy_pos
"""

# define configuration
config_path = 'NB_t11a_dynamic_xy_pos_m0003.json'

# declare parameters
best_eyebrow_IoU_ft = [209, 148, 59, 208]
best_lips_IoU_ft = [
    77, 79, 120, 104, 141, 0, 34, 125, 15, 89, 49, 237, 174, 39, 210, 112, 111,
    201, 149, 165, 80, 42, 128, 74, 131, 193, 133, 44, 154, 101, 173, 6, 148,
    61, 27, 249, 209, 19, 247, 90, 1, 255, 182, 251, 186, 248
]

# load config
config = load_config(config_path, path='configs/norm_base_config')

# create directory if non existant
save_path = os.path.join("models/saved", config["config_name"])
if not os.path.exists(save_path):
    os.mkdir(save_path)

# load and define model
v4_model = load_extraction_model(config,
                                 input_shape=tuple(config["input_shape"]))
v4_model = tf.keras.Model(inputs=v4_model.input,
                          outputs=v4_model.get_layer(
                              config['v4_layer']).output)
size_ft = tuple(np.shape(v4_model.output)[1:3])
print("[LOAD] size_ft", size_ft)
print("[LOAD] Model loaded")
예제 #9
0
"""
2020/12/07
This script plots the result from evaluate_all_nu.py
"""

import os
import numpy as np
import matplotlib.pyplot as plt
from utils.load_config import load_config

config = load_config("norm_base_affectNet_sub8_4000_t0006.json")

if isinstance(config['nu'], list):
    nus = config['nu']
else:
    nus = [config['nu']]

accuracies = np.zeros(len(nus))
for i, nu in enumerate(nus):
    config['nu'] = nu

    save_folder = os.path.join("../../models/saved", config['save_name'],
                               'nu_%f' % nu)
    accuracy = np.load(os.path.join(save_folder, "accuracy.npy"))
    accuracies[i] = accuracy

# create plot
fig = plt.figure(figsize=(15, 10))
plt.plot(nus, accuracies, marker='x')
plt.title("Accuracy over nu")
plt.xlabel("nu")
config_path = 'CNN_t03a_feature_map_positions_face_shape_m0001.json'

# declare parameters
eyebrow_ft_idx = [
    148, 209, 208, 67, 211, 141, 90, 196, 174, 179, 59, 101, 225, 124, 125, 156
]  # from t02_find_semantic_units
lips_ft_idx = [
    79, 120, 125, 0, 174, 201, 193, 247, 77, 249, 210, 149, 89, 197, 9, 251,
    237, 165, 101, 90, 27, 158, 154, 10, 168, 156, 44, 23, 34, 85, 207
]
ft_idx = [eyebrow_ft_idx, lips_ft_idx]
slice_pos_eyebrow = 9
slice_pos_lips = 13

# load config
config = load_config(config_path, path='configs/CNN')

# create directory if non existant
save_path = os.path.join("models/saved", config["config_name"])
if not os.path.exists(save_path):
    os.mkdir(save_path)

# load and define model
model = load_extraction_model(config, input_shape=tuple(config["input_shape"]))
model = tf.keras.Model(inputs=model.input,
                       outputs=model.get_layer(config['v4_layer']).output)
size_ft = tuple(np.shape(model.output)[1:3])
print("[LOAD] size_ft", size_ft)
print("[LOAD] Model loaded")
print()
# mode: maximum, maximum10, weighted average
# n_feature_map: ???, 1 plot for each map
# plot: columns: picture, response, position mode i --> 5 columns
#       rows: expressions*avatars*frame --> 8 rows

#params
n_feature_map = 54  # seems to be the one with moving eyebrow
frame_neutral_human_expression = 0
frame_expression_human_expression = 68
frame_neutral_monkey_expression = 0
frame_expression_monkey_expression = 52

mode_list = ["maximum", "maximum10", "weighted average"]

# load config
config = load_config("norm_base_calculate_position_demo_t0001.json")
# load data
images_human_fear = load_data(config, train=1)[0]
images_monkey_threat = load_data(config, train=2)[0]
# reduce data
selection_human_expression = [
    frame_neutral_human_expression, frame_expression_human_expression,
    150 + frame_neutral_human_expression,
    150 + frame_expression_human_expression
]
selection_monkey_expression = [
    frame_neutral_monkey_expression, frame_expression_monkey_expression,
    150 + frame_neutral_monkey_expression,
    150 + frame_expression_monkey_expression
]
images = np.concatenate([
    #
    # # plot
    # plt.figure()
    # plt.plot(preds1[:, -1, -1, 0], label="conv1")
    # plt.plot(preds2[:, -1, -1, 0], label="block3_pool")
    # plt.plot(raw_data[:, -1, -1, 0], label="pixel")
    # plt.legend()
    # plt.title("amplification conv1 vs. block3_pool")
    # plt.savefig(os.path.join("models/saved", config["config_name"], "05_amplification_fm1.png"))

    # ------------------------------------------------------------------------------------------------------------------
    # test 6: test martin image
    import pandas as pd

    config_name = 'CNN_t01_martin_test_m0001.json'
    config = load_config(config_name, path='configs/CNN')

    if not os.path.exists(os.path.join("models/saved", config["config_name"])):
        os.mkdir(os.path.join("models/saved", config["config_name"]))

    # load csv
    df = pd.read_csv(config['csv'], index_col=0)
    # df["category"] = df["category"].astype(int)

    # get only human c2 = category 1
    category = "1"
    df = df[df['category'].isin([category])]

    # load data
    data = load_from_csv(df, config)
    print("shape data", np.shape(data[0]))
예제 #13
0
        loss = compute_loss(obs, actions, weights=rewards2go)
        loss.backward()
        optimizer.step()
        return

    for epoch in range(config.epochs):
        epoch_mean_reward = run_epoch()
        writer.add_scalar(args.run_name + '_mean_epoch_reward',
                          epoch_mean_reward, epoch)
        train()

    return


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', type=str, default='config/reinforce.yaml')
    parser.add_argument('--run_name', type=str, default='reinforce_exp')
    args = parser.parse_args()

    config = load_config('config/reinforce.yaml')
    writer = SummaryWriter(config.logdir)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Should store configs in some log file for easy matching against TensorBoard runs.
    for key, value in config._asdict().items():
        print(f'{key}: {value}')

    REINFORCE(config)
    writer.close()
예제 #14
0
파일: main.py 프로젝트: ajpkim/AlphaConnect
                    help='log file location.')
parser.add_argument('--steps',
                    type=int,
                    default=1000,
                    help='Number of self-play games and training steps.')
parser.add_argument('--checkpoint_freq',
                    type=int,
                    default=250,
                    help='Number of steps between each checkpoint.')
parser.add_argument('--update_freq',
                    type=int,
                    default=100,
                    help='Frequency of step and time updates')

ARGS = parser.parse_args()
config = load_config(ARGS.config_file)

checkpoint_dir = './checkpoints'
model_dir = './models'
game_history_dir = './game_history'
replay_memory_dir = './replay_memory'

for directory in [
        checkpoint_dir, model_dir, game_history_dir, replay_memory_dir
]:
    if not os.path.exists(directory):
        os.makedirs(directory)

torch.manual_seed(config.random_seed)
np.random.seed(config.random_seed)
예제 #15
0
import numpy as np
from utils.load_config import load_config
from utils.PatternFeatureReduction import PatternFeatureSelection

np.set_printoptions(precision=3)
"""
small script to test the Pattern feature selection pipeline using RBF templates

run: python -m tests.RBF.t02_pattern_ft_selection
"""

# define configuration
config_path = 'RBF_t02_pattern_ft_selection_m0001.json'
# load config
config = load_config(config_path, path='configs/RBF')

data = np.zeros((7, 7))
data[1, 1] = .2
data[1, 2] = .5
data[1, 3] = .2
data[2, 1] = .5
data[2, 2] = 1
data[2, 3] = .5
data[3, 1] = .2
data[3, 2] = .5
data[3, 3] = .2
print("data")
print(data)
# expand data for RBF
data = np.expand_dims(data, axis=0)
data = np.expand_dims(data, axis=3)
예제 #16
0
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine

from utils.load_config import load_config

Base = declarative_base()

engine = create_engine(load_config()['database'], echo=True)
Session = sessionmaker(bind=engine)
예제 #17
0
import unittest
import sys
import os

import numpy as np
import cv2

import pycocotools.mask
from pycocotools.coco import COCO

if __name__ == "__main__":
    sys.path.insert(0, os.getcwd())
from utils.image import *
from utils.load_config import load_config
load_config()
import config


class TestImage(unittest.TestCase):
    def setUp(self):
        self.annotations_file = "data/coco/annotations/instances_minival2014.json"
        if getattr(self, 'coco', None) is None:
            self.coco = COCO(self.annotations_file)
        image_name = config.IMAGE_NAME_FORMAT % ('val2014',
                                                 self.coco.getImgIds()[0])
        image_path = config.IMAGE_PATH_FORMAT % ('val2014', image_name)
        self.image = load_image(image_path)

    def test_load_image(self):
        blob = sub_mean(self.image)
        image_mean = np.mean(np.mean(self.image, axis=0), axis=0)
예제 #18
0
    - "weighted average": plot index of weighted average of activation
- config["highlight_option"]: choose which feature maps to highlight, based on plotted property (plot_vector)
    - "maximum": highlight maps with maximum value
- config["plot_reduce"]: if True reduces the displayed feature maps to selection in highlight, improves performance
"""
<<<<<<< HEAD
# load config
# t0001: human_anger, t0002: human_fear, t0003: monkey_anger, t0004: monkey_fear  --> plot cnn_output
# t0005: human_anger, t0006: human_fear, t0007: monkey_anger, t0008: monkey_fear  --> plot difference, stride3, highlight max
# t0009: human_anger, t0010: human_fear, t0011: monkey_anger, t0012: monkey_fear  --> plot difference, first, highlight max
# t0013: human_anger, t0014: human_fear, t0015: monkey_anger, t0016: monkey_fear  --> plot difference, first, reduce max
# t0017: human_anger  --> plot difference, stride3, reduce max
# t0100: human_anger  --> plot maximum
# t0104: human_anger  --> plot weighted average
# t0108: human_anger  --> plot 10 biggest values (maximum10)
config = load_config("norm_base_animate_cnn_response_t0001.json", path="configs/norm_base_config")

# load images
images,_ = load_data(config, train=config["dataset"])

# load model
normbase = NormBase(config,(224,224,3))

# calculate vector and options for plot
if config["plot_option"]=='cnn_output':
    # plot cnn_response
    vector_plot = normbase.evaluate_v4(images, flatten=False)
elif config["plot_option"]=='cnn_output_difference':
    # take difference between response and reference, reference has different options
    response = normbase.evaluate_v4(images, flatten=False)
    if config["difference_option"]=='first':
예제 #19
0
"""

import os
import numpy as np
import matplotlib.pyplot as plt
from utils.load_config import load_config
from models.NormBase import NormBase
from utils.load_data import load_data

config_names = [
    "norm_base_data_distance_t0001.json", "norm_base_data_distance_t0002.json"
]
models = []

for i, config_name in enumerate(config_names):
    config = load_config(config_name, path="configs/norm_base_config")
    save_name = config["sub_folder"]
    retrain = False

    # model
    try:
        if retrain:
            raise IOError("retrain = True")
        norm_base = NormBase(config,
                             input_shape=(224, 224, 3),
                             save_name=save_name)
    except IOError:
        norm_base = NormBase(config, input_shape=(224, 224, 3))

        norm_base.fit(load_data(config,
                                train=config["train_dim_ref_tun_ref"][0]),
"""

import os
import numpy as np
import matplotlib.pyplot as plt

from utils.load_config import load_config
from models.NormBase import NormBase

# t0001: 2-norm     t0002: 1-norm   t0003: simplified   t0004: direction-only   t0005: expressitivity-direction
# t0006: 2-norm-monkey_morph

do_reverse = False
do_normalize = False

config = load_config("norm_base_reproduce_ICANN_t0015.json",
                     path="configs/norm_base_config")
save_name = config["sub_folder"]
save_folder = os.path.join("models/saved", config['save_name'], save_name)
accuracy = np.load(os.path.join(save_folder, "accuracy.npy"))
it_resp = np.load(os.path.join(save_folder, "it_resp.npy"))
labels = np.load(os.path.join(save_folder, "labels.npy"))
norm_base = NormBase(config, input_shape=(224, 224, 3))

print("accuracy", accuracy)
print("it_resp.shape", it_resp.shape)
print("labels.shape", labels.shape)

# colors 0=Neutral=Black, 1=Threat=Yellow, 2=Fear=Blue, 3=LipSmacking=Red
colors = config['colors']
titles = config['condition']
seq_length = config['seq_length']
test = np.arange(36)
n_feature_map = 4
feature_map_size = 3
t = np.reshape(test, (feature_map_size, feature_map_size, n_feature_map))
print(t[:, :, 0])
print(t[:, :, 1])
print(t[:, :, 2])
print(t[:, :, 3])
x, y, z = np.unravel_index(17,
                           (feature_map_size, feature_map_size, n_feature_map))
#x, y, z = get_feature_map_index(17, n_feature_map, feature_map_size)
print("({}, {}, {})".format(x, y, z))

# load configuration
config_name = "norm_base_investigate_PCA_m0001.json"
config = load_config(config_name)

# fit models with each condition
avatars = ["human_orig", "monkey_orig", "all_orig"]
# avatars = ["human_orig"]
indexes = []
# pca_threshold = [300, 300, 1500]
pca_threshold = [600, 600, 2000]
for i, avatar in enumerate(avatars):
    # modify condition according to loop
    config['train_avatar'] = avatar

    # define and train norm base model
    norm_base = NormBase(config, input_shape=(224, 224, 3))
    norm_base.pca.var_threshold = pca_threshold[i]
    norm_base.fit(load_data(config, train=True),
예제 #22
0
- use selection_array to reduce the array of positions (recommended by Tim)
- use highlight_intersect to reduce the feature maps
"""

import numpy as np
import matplotlib.pyplot as plt
import os

from utils.load_config import load_config
from utils.load_data import load_data
from models.NormBase import NormBase
from utils.plot_cnn_output import plot_cnn_output
from utils.calculate_position import calculate_position

# load config
config = load_config("norm_base_config_plot_frequency_time_t0001.json")

# load data --> 150 x 224 x 224 x 3
images,_ = load_data(config, train=1)

# load NormBase
norm_base = NormBase(config, (224,224,3))

# (train NormBase)--> maybe necessary, but not atm

# get_preds --> 150 x 512, x and y concatenated
positions = norm_base.get_preds(images)

# calculate fft over time and leave out constant term
rfft = np.fft.rfft(positions, axis=0)
rfft_abs = np.abs(rfft)
import os

from plots_utils.plot_sequence import plot_sequence
from utils.load_config import load_config
from utils.load_data import load_data
from models.NormBase import NormBase

"""
test a face transfer using norm base mechanism

run: python -m projects.facial_shape_expression_recognition_transfer.03_face_part_semantic_feature_map_selection
"""

# load config
config_name = 'NB_morph_space_semantic_pattern_m0002.json'
config = load_config(config_name, path='configs/norm_base_config')

full_train = False

# --------------------------------------------------------------------------------------------------------------------
# train model
if full_train:
    # declare model
    model = NormBase(config, input_shape=tuple(config['input_shape']))

    # load data
    data = load_data(config)

    # fit model
    face_neurons = model.fit(data)
예제 #24
0
import tornado.ioloop
from tornado import ioloop, web
from tornado.platform.asyncio import AsyncIOMainLoop

from handlers.add_packages_handler import AddPackagesHandler
from handlers.get_packages_handler import GetPackagesHandler
from handlers.hello_handler import HelloHandler
from handlers.package_info_handler import PackageInfoHandler
from package_management.package_manager import PackageManager
from package_management.paths_util import PathsUtil
from package_retention.retention_manager import RetentionManager
from security.privilege_validator import PrivilegeValidator
from utils import load_config
import os

config = load_config.load_config()


def setup_logging():
    logger = logging.getLogger()
    formatter = logging.Formatter("%(asctime)s [%(levelname)s] - %(message)s")

    logger.setLevel(logging.DEBUG)

    logs_folder = config['logsFolder']
    os.makedirs(logs_folder, exist_ok=True)

    file_handler = RotatingFileHandler(os.path.join(logs_folder,
                                                    'zippero-server.log'),
                                       maxBytes=1024 * 1024 * 10,
                                       backupCount=3)
예제 #25
0
def server(argv):
    args = ctparser.parse_args(argv)
    confFile = 'conf/server.yaml' if len(args.conf) == 0 else os.path.join(
        odir, args.conf)
    config = load_config('conf/server.yaml.template')
    config_new = load_config(confFile)
    update_config(config, config_new)
    if len(args.override) > 0:
        for line in args.override.split(';'):
            exec('config.' + line)
    if args.port > 0:
        config.tornado.port = args.port

    signal.signal(signal.SIGINT, signal_handler)
    settings = {
        "template_path": os.path.join(os.path.dirname(__file__), "public"),
        "cookie_secret": config.tornado.secret,
        "xsrf_cookies": config.tornado.xsrf,
        "login_url": "/p/login",
        "static_path": "public"
    }
    import handler
    from auth import authFuncs
    from file import fileCache
    from model import modelCache
    from task import taskCache
    from user import userCache
    from utils.conn2db import conn2db
    conn = conn2db(config)
    cfile = fileCache(conn, config)
    cmodel = modelCache(conn, config)
    ctask = taskCache(conn, config)
    cuser = userCache(conn, config)
    cauth = authFuncs(conn, config, cfile, cuser, ctask, cmodel)

    RSettings = {
        'authCache': cauth,
        'userCache': cuser,
        'taskCache': ctask,
        'modelCache': cmodel,
        'fileCache': cfile,
        'config': config.tornado
    }
    DSettings = copy(RSettings)
    DSettings['path'] = os.path.join(curdir, 'data')
    SSettings = copy(RSettings)
    SSettings['path'] = os.path.join(curdir, 'static')

    application = tornado.web.Application([
        (r'/p/login', handler.LoginHandler, RSettings),
        (r'/logout', handler.LogoutHandler),
        (r'(/request|/auth)$', handler.CommonRequestHandler, RSettings),
        (r'/data/(.*?)$', handler.AuthStaticFileHandler, DSettings),
        (r'/site/(.*?)$', handler.AuthStaticFileHandler, SSettings),
        (r'/p/(.*?)$', tornado.web.StaticFileHandler, {
            'path': os.path.join(curdir, 'public')
        }),
        (r'/(.*?)$', handler.DefaultRedirectHandler),
    ], **settings)

    server = tornado.httpserver.HTTPServer(
        application,
        ssl_options={
            "certfile": os.path.join(curdir, "ssl/auth.crt"),
            "keyfile": os.path.join(curdir, "ssl/auth.key")
        })

    port = 8888 if len(sys.argv) <= 1 else int(sys.argv[1])
    server.listen(port)
    tornado.ioloop.PeriodicCallback(try_exit, 100).start()
    tornado.ioloop.IOLoop.instance().start()
run: python -m projects.dynamic_facial_expressions_ICANN.01_reproduce_ICANN_NormBase
"""

import numpy as np
import os

from utils.load_data import load_data
from utils.load_config import load_config
from models.NormBase import NormBase
from datasets_utils.expressivity_level import segment_sequence

# t0001: 2-norm     t0002: 1-norm   t0003: simplified   t0004: direction-only   t0005: expressitivity-direction
# t0006: 2-norm-monkey_morph

# config = load_config("norm_base_reproduce_ICANN_t0015.json", path="configs/norm_base_config")
config = load_config("NB_reproduce_ICANN_m0002.json",
                     path="configs/norm_base_config")
# config["tau_u"] = 3
# config["tau_v"] = 3
config['tau_y'] = 2  # 15
config['tau_d'] = 2

# --------------------------------------------------------------------------------------------------------------------
# train model
# load training data
train_data = load_data(config)
train_data[0] = segment_sequence(train_data[0], config['train_seq_start_idx'],
                                 config['seq_length'])
train_data[1] = segment_sequence(train_data[1], config['train_seq_start_idx'],
                                 config['seq_length'])
print("[LOAD] Shape train_data segmented", np.shape(train_data[0]))
print("[LOAD] Shape train_label segmented", np.shape(train_data[1]))
from models.NormBase import NormBase

# setting
retrain = False

# param, order: human, monkey, both
config_names = [
    "norm_base_investigate_layer_t0001.json",
    "norm_base_investigate_layer_t0002.json",
    "norm_base_investigate_layer_t0003.json"
]

# load config
configs = []
for config_name in config_names:
    configs.append(load_config(config_name))

# train models/load model
norm_base_list = []
for config in configs:
    try:
        if retrain:
            raise IOError("retrain = True")
        norm_base = NormBase(config,
                             input_shape=(224, 224, 3),
                             save_name=config["sub_folder"])
    except IOError:
        norm_base = NormBase(config, input_shape=(224, 224, 3))
        norm_base.fit(load_data(config,
                                train=config["train_dim_ref_tun_ref"][0]),
                      fit_dim_red=True,
예제 #28
0
"""
2021/01/18
This script plots the network response to basic shapes and the movement of them.
Look under load_data._load_basic_shapes for all the possibilities of basic shapes.
"""

import numpy as np
import tensorflow as tf
import os

from plots_utils.plot_cnn_output import plot_cnn_output
from utils.load_config import load_config
from utils.load_data import load_data

# load config
config = load_config("norm_base_basic_shape_t0002.json")

# make folder
folder = os.path.join("../../models/saved", config["save_name"])
if not os.path.exists(folder):
    os.mkdir(folder)

# cnn
model = tf.keras.applications.VGG19(include_top=False,
                                    weights="imagenet",
                                    input_shape=(224, 224, 3))
model = tf.keras.Model(inputs=model.input,
                       outputs=model.get_layer("block3_pool").output)

# calculate and plot response
images = load_data(config, train=config["subset"])