コード例 #1
0
def main(params):
    """ Main function for the experiments of "Causal effect estimation using neural autoregressive density estimators".
    """
    # Initialize the results logger.
    logger = initialize_logger('./results/training_logger.log')

    # use GPU if available
    if params["cuda"] and torch.cuda.is_available():
        params["device"] = torch.tensor(get_freer_gpu(), dtype=float)
    else:
        params["device"] = "cpu"

    # Set the random seed for reproducible experiments
    torch.manual_seed(params["random_seed"])
    if params["cuda"]:
        torch.cuda.manual_seed(params["random_seed"])

    # Set up the experiment name (it must contain all the hyper-parameters we are searching over):
    if "name" not in params:
        params["name"] = f'{params["model"]}_' + f'OPTIM={params["optimizer"]}_' + \
                            f'LR={params["learn_rate"]}_'.replace(".", "-") + f'ACT={params["activation"]}_' + \
                            f'ARCH={str(params["architecture"]).replace("[", "").replace("]", "").replace(", ", "-")}_' + \
                            f'POLY={params["polynomials"]}'

    # Create the results folder for that particular experiment:
    if not os.path.exists(f'./results/{params["name"]}'):
        os.mkdir(f'./results/{params["name"]}')

    # Load the data and initialise the optimizer
    data, train_loader, model, loss_fn, optimizer = load_and_intialize(params)

    # Train the NN
    cum_loss = train(model, optimizer, loss_fn, train_loader, params)
    plot_loss(np.asarray(cum_loss), params)

    # Initalise the results dictionary
    results = {}

    # Evaluate
    model.eval()
    results["final_loss"] = cum_loss[-1]
    results["causal_effect"] = causal_effect_estimation_and_plotting(model.to("cpu").float(), params, data)
    results["evaluation"] = evaluate(params, results, data)

    # Log the estimated causal effect
    logging.info(f'The estimated causal effect is: {results["causal_effect"]}')

    # Save the results
    save_dict = {**params, **results}
    # Write the results and architecture in the result.csv file
    save_csv('./results/results.csv', save_dict)
コード例 #2
0
def visualize():
	lda_model, corpus, data_lemmatized, dictionary = train()

	#Perplejidad
	print('\nPerplexity: ', lda_model.log_perplexity(corpus))  # a measure of how good the model is. lower the better.

	# Score de coherencia
	coherence_model_lda = CoherenceModel(model=lda_model, texts=data_lemmatized, dictionary=dictionary, coherence='c_v')
	coherence_lda = coherence_model_lda.get_coherence()
	print('\nCoherence Score: ', coherence_lda)

	# Visualizamos los temas
	pyLDAvis.enable_notebook()
	vis = pyLDAvis.gensim.prepare(lda_model, corpus, dictionary)
	vis
コード例 #3
0
def bootstrap_estimation(params):
    """ Runs bootstrap to estimate the confidence intervals of causal effects.

    Args:
        n: Number of bootstrap samples.
        params: Parameters of the experiment.
    """
    # Set up the experiment name (it must contain all the hyper-parameters we are searching over):
    if "name" not in params:
        params["name"] = f'bootstrap_{params["model"]}_' + f'OPTIM={params["optimizer"]}_' + \
                            f'LR={params["learn_rate"]}_'.replace(".", "-") + f'ACT={params["activation"]}_' + \
                            f'ARCH={str(params["architecture"]).replace("[", "").replace("]", "").replace(", ", "-")}_' + \
                            f'POLY={params["polynomials"]}'

    # Create the results folder for that particular experiment:
    if not os.path.exists(f'./results/{params["name"]}'):
        os.mkdir(f'./results/{params["name"]}')

    # use GPU if available
    if params["cuda"] and torch.cuda.is_available():
        params["device"] = torch.tensor(get_freer_gpu(), dtype=float)
    else:
        params["device"] = "cpu"

    # Set the random seed for reproducible experiments
    torch.manual_seed(params["random_seed"])
    if params["cuda"]:
        torch.cuda.manual_seed(params["random_seed"])

    params["plot"] = False

    bootstrap_estimate = []
    for b in trange(params["num_bootstrap"], desc="Bootstrap sample"):
        params["bootstrap_seed"] = b
        data, train_loader, model, loss_fn, optimizer = load_and_intialize(
            params)
        _ = train(model, optimizer, loss_fn, train_loader, params)
        bootstrap_estimate.append(
            causal_effect_estimation_and_plotting(model, params, data))

    results = bootstrap_statistics(bootstrap_estimate)
    bootstrap_plot(results, data, params)

    # Save the results
    save_dict = {**params, **results}
    # Write the results and architecture in the result.csv file
    save_csv('./results/bootstrap_results.csv', save_dict)
コード例 #4
0
async def train_model():
	train()

	return {'Result': 'model.pkl produced'}
コード例 #5
0
async def train_model():
    train(False)

    return {'result': 'model trained'}
コード例 #6
0
lstm_size = 256
emb_size = 200
logging.info('Batch {}. Epochs {} LSTM {}'.format(batch_size, epochs,
                                                  lstm_size, emb_size))
# %%
model = NextItemPredictor(lstm_size, emb_size)
model.compile(
    optimizer='adam',
    loss='sparse_categorical_crossentropy',
)
# %%
class_weight = compute_class_weight('balanced', np.unique(train_y), train_y)
es = keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=1)
train(model,
      train_X=train_X,
      train_y=train_y,
      validation_X=validation_X,
      validation_y=validation_y,
      class_weight=class_weight,
      batch_size=batch_size,
      epochs=epochs,
      callbacks=[es, tensorboard_callback],
      description='batch{}-epochs{}-lstm{}'.format(batch_size, epochs,
                                                   lstm_size))
# %%
y_pred = predict_10(model, test_X)
# %%
sps(test_y, y_pred)
# %%
item_coverage(test_y, y_pred)
コード例 #7
0
window_size = 1024
sample_spacing = 256
channels = 23
bs = 200
lr = 0.05  # 0.0005  # 67% lr = 0.00075 # 64%
epochs = 50
train_ratio = 0.8
type = 3
cnn = 1

if cnn:
    overlap = 0.5
    seconds = 30

eeg_processed_folder = "/home/jmsvanrijn/Documents/Afstuderen/Code/low-power-epilepsy-detection/data/processed/"

writer = SummaryWriter()
train_loader, valid_loader = load_data(eeg_processed_folder, window_size, bs,
                                       type, train_ratio)

# Possibly need to normalize first z = (x-u)/s
# epilepsy_model = DoubleLayer(window_size, window_size, channels).double()
# epilepsy_model = convmodel(window_size, 1).double()
epilepsy_model = twod_convmodel().double()
optimizer = optim.Adam(epilepsy_model.parameters(), lr=lr)
criterion = nn.BCEWithLogitsLoss()
train(epilepsy_model, train_loader, valid_loader, epochs, criterion, optimizer,
      writer)

writer.close()