Beispiel #1
0
                  x_dist=x_dist,
                  batchnorm=batchnorm,
                  mc_samples=mc_samps,
                  l2_reg=l2_reg,
                  learning_paradigm=learning_paradigm,
                  name=model_token,
                  ckpt=model_token,
                  loss_ratio=loss_ratio)
 #
 if model_name == 'gm_dgm':
     model = gm_dgm(n_x,
                    n_y,
                    n_z,
                    n_hidden,
                    x_dist=x_dist,
                    batchnorm=batchnorm,
                    mc_samples=mc_samps,
                    l2_reg=l2_reg,
                    learning_paradigm=learning_paradigm,
                    name=model_token,
                    ckpt=model_token,
                    loss_ratio=loss_ratio)
 #
 if model_name == 'gmm_vae':
     model = gmm_vae(n_x,
                     n_y,
                     n_w,
                     n_z,
                     n_hidden,
                     x_dist=x_dist,
                     batchnorm=batchnorm,
                     mc_samples=mc_samps,
Beispiel #2
0
verbose = 3

Data.reset_counters()
results=[]
for i in range(num_runs):
    print("Starting work on run: {}".format(i))
    Data.reset_counters()
    np.random.seed(2)
    tf.set_random_seed(2)
    tf.reset_default_graph()
    model_token = token+'-'+str(i)+'---'

    if model_name == 'm2':
        model = m2(n_x, n_y, n_z, n_hidden, x_dist=x_dist, batchnorm=batchnorm, mc_samples=mc_samps, l2_reg=l2_reg, learning_paradigm=learning_paradigm, name=model_token, ckpt = model_token)
    if model_name == 'gm_dgm':
        model = gm_dgm(n_x, n_y, n_z, n_hidden, x_dist=x_dist, batchnorm=batchnorm, alpha=alpha, mc_samples=mc_samps, l2_reg=l2_reg, learning_paradigm=learning_paradigm, name=model_token, ckpt = model_token, prior=prior[0:n_y]/float(sum(prior[0:n_y])), loss_ratio=loss_ratio, output_dir=output_dir)

    if learning_paradigm == 'semisupervised' or 'semi-unsupervised':
        model.loss = model.compute_loss()
    elif learning_paradigm == 'unsupervised':
        model.loss = model.compute_unsupervised_loss()
    elif model.learning_paradigm == 'supervised':
        model.loss = model.compute_supervised_loss()

    model.train(Data, n_epochs, l_bs, u_bs, lr, eval_samps=eval_samps, binarize=binarize, verbose=1)
    results.append(model.curve_array)
    np.save(os.path.join(output_dir,'curve_'+token+'_'+str(i)+'.npy'), model.curve_array)
    y_pred_test = predict_new(Data.data['x_test'])[0]
    conf_mat = confusion_matrix(Data.data['y_test'].argmax(1), y_pred_test.argmax(1))
    np.save(os.path.join(output_dir,'conf_mat_'+token+'_'+str(i)+'.npy'), conf_mat)
    np.savez(os.path.join(output_dir,'y_preds_labels_'+token+'_'+str(i)+'.npz'), y_true=Data.data['y_test'].argmax(1), y_pred=y_pred_test.argmax(1), y_labels = y_test[1])
Beispiel #3
0
                   n_z,
                   x_dist=x_dist,
                   mc_samples=mc_samps,
                   alpha=alpha,
                   l2_reg=l2_reg,
                   learning_paradigm=learning_paradigm,
                   name=model_token,
                   ckpt=model_token,
                   output_dir=output_dir)
    if model_name == 'gm_dgm':
        model = gm_dgm(n_x,
                       n_y,
                       n_z,
                       x_dist=x_dist,
                       mc_samples=mc_samps,
                       alpha=alpha,
                       l2_reg=l2_reg,
                       learning_paradigm=learning_paradigm,
                       name=model_token,
                       ckpt=model_token,
                       prior=prior[0:n_y] / float(sum(prior[0:n_y])),
                       output_dir=output_dir)

    if learning_paradigm == 'semisupervised' or 'semi-unsupervised':
        model.loss = model.compute_loss()
    elif learning_paradigm == 'unsupervised':
        model.loss = model.compute_unsupervised_loss()
    elif model.learning_paradigm == 'supervised':
        model.loss = model.compute_supervised_loss()

    model.train(Data,
                n_epochs,