def main(args):
	if args.model is not None:
		model_class = Model_lstm
		model = model_class()
	elif args.file is not None:
		model = tf.keras.models.load_model(f"models/{args.file}.h5")
	else:
		raise Exception("No valid model")

	model.summary()
	utils.plot_model(model, args.file)
Ejemplo n.º 2
0
def main(args):
    model = None
    use_mfcc = False
    use_gabor = False

    if args.model == "model_1":
        model_class = Model_1
        model = model_class()
    if args.model == "model_mfcc":
        use_mfcc = True
        model_class = Model_mfcc
        model = model_class()
    if args.model == "model_lstm":
        model_class = Model_lstm
        model = create_model()

    checkpoint_prefix = os.path.join(Config.CHECKPOINTS_DIR, "ckpt_{epoch}")
    log_dir = Config.TENSORBOARD_LOGDIR + "\\fit\\" + datetime.datetime.now(
    ).strftime("%Y%m%d-%H%M%S")

    callbacks = [
        # Interrupt training if `val_loss` stops improving for over 2 epochs
        # tf.keras.callbacks.EarlyStopping(patience=10, monitor='val_loss'),
        # tf.keras.callbacks.ModelCheckpoint(checkpoint_prefix, save_best_only=True, period=2),
        # Write TensorBoard logs to `./logs` directory
        tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
    ]

    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    trained_model = train(model=model,
                          epochs=args.epochs,
                          callbacks=callbacks,
                          use_mfcc=use_mfcc,
                          use_gabor=use_gabor)

    if args.save_model == True:
        # The model description can also be saved as a JSON
        # model.save_weights(f"{args.model}.h5", overwrite=False, save_format="HDF5")
        # new_model = model_class()

        # dataset, _ = create_training_dataset(batch_size=5, use_mfcc=use_mfcc, use_gabor=use_gabor)
        # new_model = init_model(new_model, dataset)
        # new_model.load_weights(f"{args.model}.h5")
        trained_model.save("latest.h5", overwrite=False)

    if args.print_summary == True:
        loaded_model = tf.keras.models.load_model("latest.h5")
        loaded_model.summary()
        utils.plot_model(loaded_model)
Ejemplo n.º 3
0
DURATION = 250 * pq.ms

# # The JIT model is compiled.
# The first model evaluation compiles the model to C code implicitly, from that point onwards evaluation speeds are fractions of ms. This is fast for python as you can see below.

# In[2]:

IinRange = [60, 70, 85, 100]
params = {}
params['amplitude'] = 500 * pq.pA
params['delay'] = DELAY
params['duration'] = 600 * pq.ms
fig_title = 'Layer 5 regular spiking (RS) pyramidal cell (fig 8.12)'
plot_model(IinRange,
           reduced_cells,
           params,
           cell_key='RS',
           title=fig_title,
           timed=True)

# In[3]:

IinRange = [290, 370, 500, 550]
params = {}
params['delay'] = DELAY
params['duration'] = 600 * pq.ms
fig_title = 'Layer 5 intrinsic bursting (IB) pyramidal cell (fig 8.19)'
plot_model(IinRange, reduced_cells, params, cell_key='IB', title=fig_title)

# In[4]:

IinRange = [200, 300, 400, 600]
Ejemplo n.º 4
0
import glob
from pathlib import Path


def pull_data(output_dir: str) -> pd.DataFrame:
    """ Pulls and concatenates separate csvs from model run"""
    data_files = glob.glob(f"{output_dir}/*.csv")
    df = pd.concat((pd.read_csv(f) for f in data_files), sort=True)
    return df


if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument("--output_path")
    parser.add_argument("--title")

    args = parser.parse_args()

    output_path = args.output_path  # Root of directory to read in inputs from
    title = args.title
    df = pull_data(output_path)

    plot_model(predicted=df.predicted,
               observed=df.observed,
               title=title,
               plot_output_path=Path(output_path) / f"diagnostics/{title}.png")

    # Save compiled data frame
    df.to_csv(Path(output_path) / "diagnostics/summarized.csv")
Ejemplo n.º 5
0
        print('\nValidate model on {} unknown validation samples:'.format(
            X_val.shape[0]))
        val_score = model.evaluate(X_val, y_val, verbose=0)
        print('Val loss:', val_score[0])
        print('Val accuracy:', val_score[1])
    return model, model_results, history


print("Load data")
data = load_data()
X_train, y_train, X_test, y_test, X_val, y_val = get_shuffled_splitted_data()
print("Start training")
model, model_results, history = train()
utils.plot_history(model_results)
plt.savefig('alexnet_model_history.png')

# Always same results
np.random.seed(1)
y_val_pred = model.predict(X_val, batch_size=32, verbose=0)
y_val_pred = np.round(y_val_pred).astype(int)
is_lgg = y_val.argmax(axis=1) == 0
utils.plot_predicted_samples(4, X_val[is_lgg], y_val[is_lgg],
                             y_val_pred[is_lgg], 'Validation set - LGG',
                             (WIDTH, HEIGHT))
plt.savefig('alexnet_samples_lgg.png')
utils.plot_predicted_samples(4, X_val[is_lgg == False], y_val[is_lgg == False],
                             y_val_pred[is_lgg == False],
                             'Validation set - HGG', (WIDTH, HEIGHT))
plt.savefig('alexnet_samples_hgg.png')
utils.plot_model(model, 'skull_classification_model_alexnet.png', show=False)
Ejemplo n.º 6
0
lr_scheduler = LearningRateScheduler(lr_sch)
lr_reducer = ReduceLROnPlateau(monitor='val_my_metric',
                               factor=0.2,
                               patience=5,
                               mode='max',
                               min_lr=1e-3)

checkpoint = ModelCheckpoint('model.h5',
                             monitor='val_loss',
                             verbose=0,
                             save_best_only=True,
                             mode='auto')

model_details = model.fit(data_train,
                          box_train,
                          batch_size=128,
                          epochs=700,
                          shuffle=True,
                          validation_split=1,
                          callbacks=[lr_scheduler, lr_reducer, checkpoint],
                          verbose=1)

model.save('model.h5')

scores = model.evaluate(data_test, box_test, verbose=1)
print('Test loss : ', scores[0])
print('Test accuracy : ', scores[1])

plot_model(model_details)
Ejemplo n.º 7
0
def fit_model(sess,model,t,Y,Nw=10,num_iter=500,print_every=10,eta=5e-3,dec_step=20,dec_rate=0.99,plot_=True):
    """ Fits the NPDE model to a dataset and returns the fitted object
    
    Args:
        sess: TensowFlow session needed for initialization and optimization
        t: Python array of numpy vectors storing observation times
        Y: Python array of numpy matrices storing observations. Observations
             are stored in rows.
        Nw: Integer number of samples used for optimization in SDE model
        num_iter: Integer number of optimization steps
        num_iter: Integer interval of optimization logs to be printed
        eta: Float step size used in optimization, must be carefully tuned
        dec_step: Float decay interval of the step size
        dec_rate: Float decay rate of the step size
        plot_: Boolean for plotting the model fit. Valid only for demo
        
    Returns:
        npde: Fitted model
    """
    print('Building loss function...')
    x0 = np.vstack([Y_[0] for Y_ in Y])
    if model.name is 'npode':
        with tf.name_scope("cost"):
            X = model.forward(x0,t)
            ll = []
            for i in range(len(X)):
                mvn = tf.contrib.distributions.MultivariateNormalFullCovariance(loc=X[i],covariance_matrix=tf.diag(model.sn))
                ll.append(tf.reduce_sum(mvn.log_prob(Y[i])))
            ll = tf.reduce_logsumexp(ll)
            ode_prior = model.build_prior()
            cost = -(ll + ode_prior)

    elif model.name is 'npsde':
        with tf.name_scope("cost"):
            Xs = model.forward(x0,t,Nw=Nw)
            ll = 0
            for i in range(len(Y)):
                mvn = tf.contrib.distributions.MultivariateNormalFullCovariance(loc=Y[i],covariance_matrix=tf.diag(model.sn))
                ll_i = tf.stack([mvn.log_prob(Xs[i][j,:,:]) for j in range(Xs[i].shape[0])]) # Nw x D
                ll_i = tf.reduce_sum(tf.log(tf.reduce_mean(tf.exp(ll_i),axis=0)))
                ll += ll_i
            sde_prior = model.build_prior()
            cost = -(ll + sde_prior)

    else:
        raise NotImplementedError("model parameter should be either 'ode' or 'sde', not {:s}\n".format(model))

    print('Adam optimizer being initialized...')
    with tf.name_scope("adam"):
        global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
        expdec = tf.train.exponential_decay(eta,global_step,dec_step,dec_rate,staircase=True)
        optimizer = tf.train.AdamOptimizer(expdec).minimize(cost,global_step)

    sess.run(tf.global_variables_initializer())
    # global_vars = tf.global_variables()
    # sess.run(tf.variables_initializer(var_list=[v for v in global_vars if 'adam' in v.name]))

    print('Optimization starts.')
    print('{:>16s}'.format("iteration")+'{:>16s}'.format("objective"))
    for i in range(1,num_iter+1):
        _cost,_ = sess.run([cost,optimizer])
        if i==1 or i%print_every==0 or i==num_iter:
            print('{:>16d}'.format(i)+'{:>16.3f}'.format(_cost))
    print('Optimization ends.')

    if plot_:
        print('Plotting...')
        plot_model(model,t,Y,Nw=50)

    return model
Ejemplo n.º 8
0
def npde_fit(sess,
             t,
             Y,
             model='sde',
             sf0=1.0,
             ell0=[2, 2],
             sfg0=1.0,
             ellg0=[1e5],
             W=6,
             ktype="id",
             whiten=True,
             Nw=50,
             fix_ell=False,
             fix_sf=False,
             fix_Z=False,
             fix_U=False,
             fix_sn=False,
             fix_ellg=False,
             fix_sfg=False,
             fix_Zg=True,
             fix_Ug=False,
             num_iter=500,
             print_int=10,
             eta=5e-3,
             dec_step=20,
             dec_rate=0.99,
             plot_=True):
    """ Fits the NPDE model to a dataset and returns the fitted object
    
    Args:
        sess: TensowFlow session needed for initialization and optimization
        t: Python array of numpy vectors storing observation times
        Y: Python array of numpy matrices storing observations. Observations
             are stored in rows.
        model: 'sde' or 'ode'
        sf0: Integer initial value of the signal variance of drift GP
        ell0: Python/numpy array of floats for the initial value of the 
            lengthscale of drift GP
        sfg0: Integer initial value of the signal variance of diffusion GP 
        ellg0: Python/numpy array of a single float for the initial value of the 
            lengthscale of diffusion GP
        W: Integer denoting the width of the inducing point grid. If the problem
            dimension is D, total number of inducing points is W**D
        ktype: Kernel type. We have made experiments only with Kronecker kernel,
            denoted by 'id'. The other kernels are not supported.
        whiten: Boolean. Currently we perform the optimization only in the 
            white domain
        Nw: Integer number of samples used for optimization in SDE model
        fix_ell: Boolean - whether drift GP lengthscale is fixed or optimized
        fix_sf: Boolean - whether drift GP signal variance is fixed or optimized
        fix_Z: Boolean - whether drift GP inducing locations are fixed or optimized
        fix_U: Boolean - whether drift GP inducing vectors are fixed or optimized
        fix_sn: Boolean - whether noise variance is fixed or optimized
        fix_ellg: Boolean - whether diffusion GP lengthscale is fixed or optimized
        fix_sfg: Boolean - whether diffusion GP signal variance is fixed or optimized
        fix_Zg: Boolean - whether diffusion GP inducing locations are fixed or optimized
        fix_Ug: Boolean - whether diffusion GP inducing vectors are fixed or optimized
        num_iter: Integer number of optimization steps
        num_iter: Integer interval of optimization logs to be printed
        eta: Float step size used in optimization, must be carefully tuned
        dec_step: Float decay interval of the step size
        dec_rate: Float decay rate of the step size
        plot_: Boolean for plotting the model fit. Valid only for demo
        
    Returns:
        npde: Fitted model
    """

    print('Model being initialized...')

    def init_U0(Y=None, t=None, kern=None, Z0=None, whiten=None):
        Ug = (Y[1:, :] - Y[:-1, :]) / np.reshape(t[1:] - t[:-1], (-1, 1))
        tmp = NPODE(Y[0, :].reshape((1, -1)),
                    t,
                    Y,
                    Z0=Y[:-1, :],
                    U0=Ug,
                    sn0=0,
                    kern=kern,
                    jitter=0.2,
                    whiten=False,
                    fix_Z=True,
                    fix_U=True,
                    fix_sn=True)
        U0 = tmp.f(X=Z0)
        if whiten:
            Lz = tf.cholesky(kern.K(Z0))
            U0 = tf.matrix_triangular_solve(Lz, U0, lower=True)
        U0 = sess.run(U0)
        return U0

    D = len(ell0)
    Nt = len(Y)
    x0 = np.zeros((Nt, D))
    Ys = np.zeros((0, D))
    for i in range(Nt):
        x0[i, :] = Y[i][0, :]
        Ys = np.vstack((Ys, Y[i]))
    maxs = np.max(Ys, 0)
    mins = np.min(Ys, 0)
    grids = []
    for i in range(D):
        grids.append(np.linspace(mins[i], maxs[i], W))
    vecs = np.meshgrid(*grids)
    Z0 = np.zeros((0, W**D))
    for i in range(D):
        Z0 = np.vstack((Z0, vecs[i].T.flatten()))
    Z0 = Z0.T

    tmp_kern = OperatorKernel(sf0, ell0, ktype="id", fix_ell=True, fix_sf=True)

    U0 = np.zeros(Z0.shape, dtype=np.float64)
    for i in range(len(Y)):
        U0 += init_U0(Y[i], t[i], tmp_kern, Z0, whiten)
    U0 /= len(Y)

    sn0 = 0.5 * np.ones(D)
    Ug0 = np.ones([Z0.shape[0], 1]) * 0.01

    ell0 = np.asarray(ell0, dtype=np.float64)
    ellg0 = np.asarray(ellg0, dtype=np.float64)

    kern = OperatorKernel(sf0=sf0,
                          ell0=ell0,
                          ktype=ktype,
                          fix_ell=fix_ell,
                          fix_sf=fix_sf)

    if model is 'ode':
        npde = NPODE(x0=x0,
                     t=t,
                     Y=Y,
                     Z0=Z0,
                     U0=U0,
                     sn0=sn0,
                     kern=kern,
                     whiten=whiten,
                     fix_Z=fix_Z,
                     fix_U=fix_U,
                     fix_sn=fix_sn)
        with tf.name_scope("cost"):
            X = npde.forward()
            ll = []
            for i in range(len(X)):
                mvn = tf.contrib.distributions.MultivariateNormalFullCovariance(
                    loc=X[i], covariance_matrix=tf.diag(npde.sn))
                ll.append(tf.reduce_sum(mvn.log_prob(Y[i])))
            ll = tf.reduce_logsumexp(ll)
            ode_prior = npde.build_prior()
            cost = -(ll + ode_prior)

    elif model is 'sde':
        diffus = BrownianMotion(sf0=sfg0,
                                ell0=ellg0,
                                U0=Ug0,
                                Z0=Z0,
                                whiten=whiten,
                                fix_sf=fix_sfg,
                                fix_ell=fix_ellg,
                                fix_Z=fix_Zg,
                                fix_U=fix_Ug)
        npde = NPSDE(x0=x0,
                     t=t,
                     Y=Y,
                     Z0=Z0,
                     U0=U0,
                     sn0=sn0,
                     kern=kern,
                     diffus=diffus,
                     whiten=whiten,
                     fix_Z=fix_Z,
                     fix_U=fix_U,
                     fix_sn=fix_sn)
        with tf.name_scope("cost"):
            Xs = npde.forward(Nw=Nw)
            ll = 0
            for i in range(len(Y)):
                mvn = tf.contrib.distributions.MultivariateNormalFullCovariance(
                    loc=Y[i], covariance_matrix=tf.diag(npde.sn))
                ll_i = tf.stack([
                    mvn.log_prob(Xs[i][j, :, :]) for j in range(Xs[i].shape[0])
                ])  # Nw x D
                ll_i = tf.reduce_sum(
                    tf.log(tf.reduce_mean(tf.exp(ll_i), axis=0)))
                ll += ll_i
            sde_prior = npde.build_prior()
            cost = -(ll + sde_prior)

    else:
        raise NotImplementedError(
            "model parameter should be either 'ode' or 'sde', not {:s}\n".
            format(model))

    print('Adam optimizer being initialized...')
    global_step = tf.Variable(0,
                              dtype=tf.int32,
                              trainable=False,
                              name='global_step')
    expdec = tf.train.exponential_decay(eta,
                                        global_step,
                                        dec_step,
                                        dec_rate,
                                        staircase=True)

    optimizer = tf.train.AdamOptimizer(expdec).minimize(cost, global_step)

    sess.run(tf.global_variables_initializer())

    print('Optimization starts.')
    print('{:>16s}'.format("iteration") + '{:>16s}'.format("objective"))
    for i in range(1, num_iter + 1):
        _cost, _ = sess.run([cost, optimizer])
        if i == 1 or i % print_int == 0 or i == num_iter:
            print('{:>16d}'.format(i) + '{:>16.3f}'.format(_cost))
    print('Optimization ends.')

    if plot_:
        print('Plotting...')
        plot_model(npde, Nw=50)

    return npde