Exemple #1
0
def main(dataset="babl"):
    if dataset == "nlvr":
        data = prepare.load_data(train_json)
        data = prepare.tokenize_data(data, mxlen)
        imgs, ws, labels = prepare.load_images(train_img_folder,
                                               data,
                                               debug=True)
        data.clear()
        imgs_mean = np.mean(imgs)
        imgs_std = np.std(imgs - imgs_mean)
        imgs = (imgs - imgs_mean) / imgs_std

    else:
        inputs_train, queries_train, answers_train, inputs_test, queries_test, answers_test = prepare.get_babl_data(
        )

    epochs = 100
    model = build_model.model()
    model.fit([inputs_train, queries_train],
              answers_train,
              validation_split=0.1,
              epochs=epochs)
    model.save('model')
Exemple #2
0
from keras.layers.convolutional import Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
import gc
import prepare
import subprocess

mxlen = 32
embedding_dim = 50
lstm_unit = 128
MLP_unit = 128
epochs = 100

train_json = 'nlvr\\train\\train.json'
train_img_folder = 'nlvr\\train\\images'
data = prepare.load_data(train_json)
data = prepare.tokenize_data(data, mxlen)
imgs, ws, labels = prepare.load_images(train_img_folder, data, debug=True)
data.clear()
imgs_mean = np.mean(imgs)
imgs_std = np.std(imgs - imgs_mean)
imgs = (imgs - imgs_mean) / imgs_std

epochs = 100
batch_size = 64


def bn_layer(x, conv_unit):
    def f(inputs):
        md = Conv2D(x, (conv_unit, conv_unit), padding='same')(inputs)
        md = BatchNormalization()(md)
Exemple #3
0
import subprocess
import pickle

mxlen = 32
embedding_dim = 50
lstm_unit = 64
MLP_unit = 128
epochs = 200
batch_size = 256
l2_norm = 1e-4

train_json = 'nlvr\\train\\train.json'
train_img_folder = 'nlvr\\train\\images'
test_json = 'nlvr\\dev\\dev.json'
test_img_folder = 'nlvr\\dev\\images'
data = prepare.load_data(train_json)
prepare.init_tokenizer(data)
data = prepare.tokenize_data(data, mxlen)
imgs, ws, labels = prepare.load_images(train_img_folder, data)
data.clear()

test_data = prepare.load_data(test_json)
test_data = prepare.tokenize_data(test_data, mxlen)
test_imgs, test_ws, test_labels = prepare.load_images(test_img_folder, test_data)
test_data.clear()

imgs_mean = np.mean(imgs)
imgs_std = np.std(imgs - imgs_mean)
imgs = (imgs - imgs_mean) / imgs_std

test_imgs = (test_imgs - imgs_mean) / imgs_std
from prepare import load_data

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split

(feature, labels) = load_data()

x_train, x_test, y_train, y_test = train_test_split(feature,
                                                    labels,
                                                    test_size=0.1)
categories = [
    'Black Samurai', 'Blue Rim', 'Crown Tail', 'Cupang Sawah', 'Halfmoon'
]

input_layer = tf.keras.layers.Input([224, 224, 3])

conv1 = tf.keras.layers.Conv2D(filters=32,
                               kernel_size=(5, 5),
                               padding='same',
                               activation='relu')(input_layer)
pool1 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv1)

conv2 = tf.keras.layers.Conv2D(filters=64,
                               kernel_size=(3, 3),
                               padding='same',
                               activation='relu')(pool1)
pool2 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv2)

conv3 = tf.keras.layers.Conv2D(filters=96,
Exemple #5
0
    for i, data in enumerate(predicted_data):
        padding = [None for p in range(i * prediction_len)]
        plt.plot(padding + data, label='Prediction')
        plt.legend()
    plt.show()


#Main Run Thread
if __name__ == '__main__':
    global_start_time = time.time()
    epochs = 60
    seq_len = 30

    print('> Loading data... ')

    X_train, y_train, X_test, y_test = prepare.load_data(seq_len)

    print('> Data Loaded. Compiling...')

    model = prepare.build_model([1, 30, 60, 1])

    model.fit(X_train,
              y_train,
              batch_size=100,
              nb_epoch=epochs,
              validation_split=0.05)

    # predictions = lstm.predict_sequences_multiple(model, X_test, seq_len, 2)
    # predictions = lstm.predict_sequence_full(model, X_test, seq_len)
    predictions = prepare.predict_point_by_point(model, X_test)

params = 'atvz'
traditional_alpha = 0.05
bonf_alpha = traditional_alpha / float(3 * 4)
depends = ['v', 'a', 't', 'z']
include = ['sv', 'st', 'sz']
path = get_path(depends, include)
data = make_summary()
data['group'] = dmatrix(
            '0 + C(s, [[0], [1]])', {'s': data.group}
    )
dmat = dmatrix('age * group', data)
coeffs = dmat.design_info.column_names

print load_data()

raw_samples = pd.read_csv(pj(path, 'all_samples.csv'), index_col=0)

for i, p in enumerate(params, 1):

    cols = ['%s_%s' % (p, c) for c in coeffs]
    if p == 'z':
        cols[0] += '_trans'
    print cols
    means = raw_samples[cols].mean(axis=0).values
    predicted = np.asarray(dmat * means).sum(axis=1)
    data[p] = predicted

    std_group = np.asarray(dmatrix('0 + C(s, [[0], [1]])', {'s': data.group})).std()
    std_age = data.age.std()
Exemple #7
0
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sb
from scipy.stats import linregress

from prepare import load_data
from tmp.analysis import get_path, pj

depends = ['v', 'a', 't', 'z']
include = ['sv', 'st']
path = get_path(depends, include)
data = load_data()
ppc_data = pd.read_csv(pj(path, 'ppc_data.csv'))

simulations = 250
results = {'real': []}
_results = {'simulation %i' %k: [] for k in xrange(simulations)}
results.update(_results)

for node, _ppc_data in ppc_data.groupby('node'):


    subj = int(node.split('.')[1])
    subj_data = data[data.subj_idx == subj]
    corr_array = (subj_data.answer == subj_data.response).astype(int) * 2 - 1
    accuracy_coded_rts = corr_array * subj_data.rt
    ntrials = len(accuracy_coded_rts)
    ncorrect = len(accuracy_coded_rts[accuracy_coded_rts > 0])
    entry = {
        'subj': subj,
def remake_model(path):

    data = load_data()
    data['group'] = dmatrix(
            '0 + C(s, [[0], [1]])', {'s': data.group}
    )
    data['stimulus'] = dmatrix(
            '0 + C(s, [[1], [-1]])', {'s': data.answer}
    )
    data = data[['subj_idx', 'age', 'group', 'stimulus', 'response', 'rt']]
    print 'csvs looks like this:'
    print data.head()

    def v_link_func(x, d=data):
        stim = (np.asarray(dmatrix('0 + C(s, [[1], [-1]])',
                            {'s': data.stimulus.ix[x.index]}))
        )
        return x * stim

    depends_on, include = path.split('/')

    if include == 'none':
        _include = ['z']
    else:
         _include = ['z'] + include.split(',')

    if 'unconstrained'not in path:

        d = '%s ~ age * group'

        lfs = {
            'a': lambda a: a,
            't': lambda t: t,
            'v': v_link_func,
            'z': lambda z: 1 / (1 + np.exp(-z))  # inverse logit
        }
        models = [{
                      'model': d % p, 'link_func': lfs[p]
                  } for p in depends_on.split(',')]

        print 'contructing model ...'
        m = hddm.HDDMRegressor(
            data=data, models=models, include=_include,
            group_only_regressors=True, keep_regressor_trace=True,
            group_only_nodes='vatz'
        )

    else:

        print 'contructing model ...'
        m = hddm.HDDMRegressor(
        data=data,
        models=[{'model': 'v ~ stimulus', 'link_func': v_link_func}],
        include=['z'] + _include,
        group_only_regressors=False,
        keep_regressor_trace=True,
        group_only_nodes=_include,
        informative=False
    )

    m.sample(3)
    stochs = m.get_stochastics()

    dfs =[pd.read_csv(pj(path, 'samples_%i.csv' % i), index_col=0).ix[3000:] for i in xrange(4)]
    df = pd.concat(dfs)
    del dfs
    print df.columns

    for node in stochs.node:
        print node.__name__
        node.trace._trace[0] = df[node.__name__.replace('age:group', 'group:age')].values

    m.gen_stats()
    m.print_stats(pj(path, 'stats.csv'), True)
    m.get_traces().to_csv(pj(path, 'all_samples.csv'))