Ejemplo n.º 1
0
    Args:
        data_points (list): each p is {'features': list_floats, 'label': int_zero_or_one}
        epochs (int): number of epochs to perform
        rate (float): learning rate
        lam (float): regularization parameter

    Returns:
        list: model weights

    """
    model = initialize_model(len(data_points[0]['features']))
    random_index_ls = list(range(len(data_points)))
    for _ in range(epochs):
        random.shuffle(random_index_ls)  # randomize sequence of features
        for i in random_index_ls:
            model = update(model, data_points[i], rate, lam)
    return model


if __name__ == '__main__':
    from data import accuracy, get_train_test_data

    train_dp, test_dp = get_train_test_data()
    EPOCHS = 5
    RATE = 0.01 #learning rate
    LAM = 0.001 #regularization parameter
    trained_model = train(train_dp, EPOCHS, RATE, LAM)
    predictions = [predict(trained_model, dp) for dp in test_dp]
    acc = accuracy(test_dp, predictions)
    print(acc)
Ejemplo n.º 2
0
f2 = open(args.log_val, 'a')
f2.write("Var starts \n")
f2.close()


# os.environ['CUDA_VISIBLE_DEVICES']="0"

BATCH_SIZE = 3
EPOCH = args.epoch
NAME = args.name
LR = 1e-4

model = create_model()

train_generator, test_generator = get_train_test_data(BATCH_SIZE)
optimizer = keras.optimizers.Adam(lr=LR, amsgrad=True)


# Training session details
runID = str(int(time.time())) + '-n' + \
        str(len(train_generator)) + '-e' + \
        str(EPOCH) + '-bs' + str(BATCH_SIZE) + '-lr' + \
        str(LR) + '-' + NAME
outputPath = './models/'
runPath = outputPath + runID
pathlib.Path(runPath).mkdir(parents=True, exist_ok=True)
print('Output: ' + runPath)

# model.compile(loss=[loss_function, loss_function, loss_function, loss_function,
#                     None, None, None, None], optimizer=optimizer,
Ejemplo n.º 3
0
def test_pytorch_model(activation: str, eta: float, momentum: float, plots: bool, n_runs: int):
    """
    Test pytorch implementation with the selected activation function and learning parameters, eventually plotting
    the learning curves.
    :param activation: activation function to be used in the network
    :param eta: SGD learning rate
    :param momentum: SGD momentum factor
    :param plots: whether produce plots as in the report
    :param n_runs: number of runs for performance estimation
    """

    tot_loss = []
    tot_err = []
    tot_err_train = []
    tot_err_test = []
    epochs = 75
    batch_size = 100

    # Test pytorch over n_runs
    print("Starting training on pytorch implementation over {} runs".format(n_runs))
    for i in range(n_runs):
        train_data, train_targets, test_data, test_targets = get_train_test_data(1000)
        print("Building model {}...".format(i))
        # fmt: off
        if activation == "relu":
            model = nn.Sequential(
                nn.Linear(2, 25),
                nn.ReLU(),
                nn.Linear(25, 25),
                nn.ReLU(),
                nn.Linear(25, 25),
                nn.ReLU(),
                nn.Linear(25, 2),
            )
        else:
            model = nn.Sequential(
                nn.Linear(2, 25),
                nn.Tanh(),
                nn.Linear(25, 25),
                nn.Tanh(),
                nn.Linear(25, 25),
                nn.Tanh(),
                nn.Linear(25, 2),
            )
        # fmt: on
        # Train pytorch and record run losses and errors
        losses, errors = train_pytorch(model, train_data, train_targets, test_data, test_targets, epochs, batch_size, eta, momentum,)

        tot_loss.append(losses)
        tot_err.append(errors)

        print("Training on model {} finished, computing accuracy on train and test...".format(i))

        train_err = compute_errors(model, train_data, train_targets, batch_size)
        test_err = compute_errors(model, test_data, test_targets, batch_size)
        tot_err_train.append(train_err)
        tot_err_test.append(test_err)

        del model

    if plots:
        # Creating plots and saving them to pdf files
        print("-------------------------------------------------------")
        print("Saving requested plots for loss and errors")
        loss_save = "losstot_pytorch_{act}_{n}runs".format(act=activation, n=n_runs)
        err_save = "err_pytorch_{act}_{n}runs".format(act=activation, n=n_runs)
        plot_over_epochs(tot_loss, epochs, "Pytorch Loss", loss_save)
        plot_over_epochs(tot_err, epochs, "Pytorch Errors", err_save)

    # Computing and printing mean loss at each epochs over the runs
    mean_train = torch.mean(torch.Tensor([val["train"] for val in tot_loss]), 0)
    mean_test = torch.mean(torch.Tensor([val["test"] for val in tot_loss]), 0)
    for e in range(epochs):
        print("Epoch {}, average train loss: {}, average test loss: {}".format(e, mean_train[e], mean_test[e]))

    # Computing mean accuracy, std and mean train time over the runs
    mean_err_train = torch.mean(torch.Tensor(tot_err_train))
    mean_err_test = torch.mean(torch.Tensor(tot_err_test))
    var_err_train = torch.std(torch.Tensor(tot_err_train))
    var_err_test = torch.std(torch.Tensor(tot_err_test))
    print("-------------------------------------------------------")
    print("Final error count and standard deviation on train and test for pytorch implementation:")
    print("Train -> Mean Error = {}, Standard deviation = {}".format(mean_err_train, var_err_train))
    print("Test -> Mean Error = {}, Standard deviation = {}".format(mean_err_test, var_err_test))

    return
Ejemplo n.º 4
0
def test_selected_model(activation: str, eta: float, momentum: float, plots: bool, n_runs: int):
    """
    Test our implementation with the selected activation function and learning parameters, eventually plotting
    the learning curves and final result.
    :param activation: activation function to be used in the network
    :param eta: SGD learning rate
    :param momentum: SGD momentum factor
    :param plots: whether produce plots as in the report
    :param n_runs: number of runs for performance estimation
    """

    # Selecting a random model to plot the xy-axis
    plot_model = random.randint(0, n_runs - 1)

    tot_loss = []
    tot_err = []
    tot_err_train = []
    tot_err_test = []
    epochs = 75
    batch_size = 100

    # Do the training
    print("Starting training on our implementation over {} runs".format(n_runs))
    for i in range(n_runs):
        # Get random data and create selected network
        train_data, train_targets, test_data, test_targets = get_train_test_data(1000)
        print("Building model {}...".format(i))
        # fmt: off
        if activation == "relu":
            model = myNN.Sequential(
                myNN.Linear(2, 25),
                myNN.ReLU(),
                myNN.Linear(25, 25),
                myNN.ReLU(),
                myNN.Linear(25, 25),
                myNN.ReLU(),
                myNN.Linear(25, 2),
            )
        else:
            model = myNN.Sequential(
                myNN.Linear(2, 25),
                myNN.Tanh(),
                myNN.Linear(25, 25),
                myNN.Tanh(),
                myNN.Linear(25, 25),
                myNN.Tanh(),
                myNN.Linear(25, 2),
            )
        # fmt: on
        # Train the network, Produce xy plots if required
        if plots and i == plot_model:
            print(
                "You chose to produce prediction visualization on a xy grid every 50 epochs.\n"
                "Model {} was randomly chosen for such plots. This train will require more time...".format(i)
            )
            losses, errors = train_myNN(
                model, train_data, train_targets, test_data, test_targets, epochs, batch_size, eta, momentum, plots, activation,
            )
        else:
            losses, errors = train_myNN(model, train_data, train_targets, test_data, test_targets, epochs, batch_size, eta, momentum, False,)

        # Add loss to list of loss
        tot_loss.append(losses)
        tot_err.append(errors)

        print("Training on model {} finished, computing accuracy on train and test...".format(i))

        # Compute error for train and test
        train_err = compute_errors(model, train_data, train_targets, batch_size)
        test_err = compute_errors(model, test_data, test_targets, batch_size)
        tot_err_train.append(train_err)
        tot_err_test.append(test_err)

        del model

    if plots:
        # Creating plots and saving them to pdf files
        print("-------------------------------------------------------")
        print("Saving requested plots for loss and errors")
        loss_save = "losstot_{act}_{n}runs".format(act=activation, n=n_runs)
        err_save = "err_{act}_{n}runs".format(act=activation, n=n_runs)
        plot_over_epochs(tot_loss, epochs, "Loss", loss_save)
        plot_over_epochs(tot_err, epochs, "Errors", err_save)

    # Computing and printing mean loss at each epochs over the runs
    mean_train = torch.mean(torch.Tensor([val["train"] for val in tot_loss]), 0)
    mean_test = torch.mean(torch.Tensor([val["test"] for val in tot_loss]), 0)
    for e in range(epochs):
        print("Epoch {}, average train loss: {}, average test loss: {}".format(e, mean_train[e], mean_test[e]))

    # Computing mean accuracy, std and mean train time over the runs
    mean_err_train = torch.mean(torch.Tensor(tot_err_train))
    mean_err_test = torch.mean(torch.Tensor(tot_err_test))
    var_err_train = torch.std(torch.Tensor(tot_err_train))
    var_err_test = torch.std(torch.Tensor(tot_err_test))
    print("-------------------------------------------------------")
    print("Final error count and standard deviation on train and test:")
    print("Train -> Mean Error = {}, Standard deviation = {}".format(mean_err_train, var_err_train))
    print("Test -> Mean Error = {}, Standard deviation = {}".format(mean_err_test, var_err_test))

    return
Ejemplo n.º 5
0
args = parser.parse_args()

# Inform about multi-gpu training
if args.gpus == 1:
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpuids
    print('Will use GPU ' + args.gpuids)
else:
    print('Will use ' + str(args.gpus) + ' GPUs.')

# Create the model
model = create_model(existing=args.checkpoint)

# Data loaders
if args.data == 'nyu':
    train_generator, test_generator = get_train_test_data(
        args.bs, data_zipfile='nyu_data.zip', max_depth=1000.0)
if args.data == 'wire':
    train_generator, test_generator = get_train_test_data(
        args.bs, data_zipfile='wire_data.zip', max_depth=1000.0)

# Training session details
runID = str(int(time.time())) + '-n' + str(len(train_generator)) + '-e' + str(
    args.epochs) + '-bs' + str(args.bs) + '-lr' + str(
        args.lr) + '-' + args.name
outputPath = './models/'
runPath = outputPath + runID
pathlib.Path(runPath).mkdir(parents=True, exist_ok=True)
print('Output: ' + runPath)

# (optional steps)
if True:
Ejemplo n.º 6
0
from data import get_train_test_data
from features import *
from settings import *
from perceptron import *

from classifier.nn import Perceptron

import tensorflow as tf



if __name__ == '__main__':
    train_text, test_text = get_train_test_data(DATA_CATEGORIES)
    vocab = get_vocab(train_text.data + test_text.data)
    n_input = len(vocab)
    word2index = get_word_2_index(vocab)

    input_tensor = tf.placeholder(tf.float32, [None, n_input], name="input")
    output_tensor = tf.placeholder(tf.float32, [None, len(DATA_CATEGORIES)], name="output")

    nn = Perceptron(n_input, len(DATA_CATEGORIES), N_HIDDEN, SIZE_HIDDEN)
    prediction = nn.predict(input_tensor)

    
    
        # Test model
        correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(output_tensor, 1))
        # Calculate accuracy
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
        total_test_data = len(test_text.target)
        batch_x_test, batch_y_test = get_batch(test_text.data, test_text.target, word2index, n_input, 0, total_test_data)
Ejemplo n.º 7
0
                    help='A name to attach to the training session')
parser.add_argument('--checkpoint',
                    type=str,
                    default='',
                    help='Start training from an existing model.')
parser.add_argument(
    '--full',
    dest='full',
    action='store_true',
    help='Full training with metrics, checkpoints, and image samples.')

args = parser.parse_args()

model = create_model(existing=args.checkpoint)
if args.data == 'nyu':
    train_generator, test_generator = get_train_test_data(args.bs)

# Training session details
runID = str(int(time.time())) + '-n' + str(len(train_generator)) + '-e' + str(
    args.epochs) + '-bs' + str(args.bs) + '-lr' + str(
        args.lr) + '-' + args.name
outputPath = './models/'
runPath = outputPath + runID
pathlib.Path(runPath).mkdir(parents=True, exist_ok=True)
print('Output: ' + runPath)

optimizer = Adam(lr=args.lr, amsgrad=True)

model.compile(loss=depth_loss_function, optimizer=optimizer)

callbacks = []
Ejemplo n.º 8
0
		sim = t[1]

		# if user v has rated item i
		if(i in item_ratings[v]):
			num += sim * (item_ratings[v][i] - np.mean(ratings[v]))
			deno += sim
	
	if(deno != 0):	
		r_hat += num/deno

	return r_hat



if __name__ == "__main__":
	df_train, df_test = data.get_train_test_data()
	# print(len(df_train['movieId'].unique()))

	df_subset = df_train[:250]
	# print(df_subset)
	users = df_subset['userId'].unique()

	seq, ratings, items, item_ratings = generate_seq(df_subset, users)
	# print(item_ratings[1][1210])

	print("In lcsis")
	lcsis, total, common_count, common_items = compute_lcsis(seq, users, ratings, items)

	print("In acsis")
	acsis = compute_acsis(seq, users, ratings)
Ejemplo n.º 9
0
parser.add_argument('--mode',
                    type=str,
                    default='predict',
                    help='三种模式:train/test/predict')
parser.add_argument('--embedding_random',
                    type=str,
                    default=True,
                    help='使用随机的字嵌入(True)还是已经预训练好的(False),默认使用随机')
parser.add_argument('--update_embedding',
                    type=str2bool,
                    default=True,
                    help='默认训练')

args = parser.parse_args()

train_data, test_data = get_train_test_data(args.embedding_random,
                                            args.max_len)
vocab, word2id, embeddings = get_embedding(args.embedding_random,
                                           args.embedding_dim)

configs = tf.ConfigProto()
configs.gpu_options.allow_growth = True
configs.gpu_options.per_process_gpu_memory_fraction = 0.2
# paths setting
paths = {}
output_path = config.output_path
if not os.path.exists(output_path):
    os.makedirs(output_path)
summary_path = os.path.join(output_path, "summaries")
paths['summary_path'] = summary_path
if not os.path.exists(summary_path):
    os.makedirs(summary_path)
Ejemplo n.º 10
0
		# file.close()
		# file = open('store/mae.txt', 'rb')
		# pickle.dump(mae, file)
		# # mae = pickle.load(file)
		# file.close()
		

		[precision, recall, F_score] = precision_recall_calculation(predictions, threshold=3.5)
		print("\n" + "-"*50)
		print("alpha = ", alpha, "; K = ", K)
		print("RMSE:", rmse)
		print("MAE:", mae)
		print("Precision: ", precision)
		print("Recall: ", recall)
		print("F-Score: ",F_score)
		print("-"*50)
		# print(str(rmse) + "\t" + str(mae) + "\t" + str(precision) + "\t" + str(recall) + "\t" + str(F_score))


				


if __name__ == "__main__":
	# We do not want the data to be sampled everytime, else the predictions won't match with each other.
	mode = "Test"
	df_train, df_test = data.get_train_test_data(new_sample = False)
	# Hyperparameters
	alpha = 0.8
	K = 1
	interest_sequence(df_train, df_test, mode, alpha, K)
Ejemplo n.º 11
0
def select_best_hyper(
    activation: str,
    etas: list,
    momentums: list,
    n_runs: int = 10,
    epochs: int = 75,
    batch_size: int = 100,
    verbose: bool = True,
) -> dict:
    """
    Get best hyper parameter for myNN implementation by grid-searching
    :param activation: activation function to create the model
    :param etas: list of learning rate to test
    :param momentums: list of momentums to test
    :param n_runs: number of runs to estimate performances
    :param epochs: number of epochs after which to stop
    :param batch_size: dimension of each batch
    :param verbose: print logging
    :return: dictionary with best parameters
    """
    best_err = sys.float_info.max
    best_params = {"eta": 0, "momentum": 0}

    for eta in etas:
        for momentum in momentums:
            tot_err = 0
            for i in range(0, n_runs):
                # Create net, train it and compute accuracy on test data
                # fmt: off
                if activation == "relu":
                    model = myNN.Sequential(
                        myNN.Linear(2, 25),
                        myNN.ReLU(),
                        myNN.Linear(25, 25),
                        myNN.ReLU(),
                        myNN.Linear(25, 25),
                        myNN.ReLU(),
                        myNN.Linear(25, 2),
                    )
                else:
                    model = myNN.Sequential(
                        myNN.Linear(2, 25),
                        myNN.Tanh(),
                        myNN.Linear(25, 25),
                        myNN.Tanh(),
                        myNN.Linear(25, 25),
                        myNN.Tanh(),
                        myNN.Linear(25, 2),
                    )

                # A new train/test set is used at each run to avoid overfitting a dataset
                train_data, train_targets, test_data, test_targets = get_train_test_data(
                    1000)
                train_myNN(model, train_data, train_targets, test_data,
                           test_targets, epochs, batch_size, eta, momentum)
                # fmt: on

                err = compute_errors(model, test_data, test_targets,
                                     batch_size)
                tot_err += err
                del model
            err_run = tot_err / n_runs
            # Save accuracy if better than current best
            if verbose:
                print("Eta = {}, momentum = {}, avg_err = {}".format(
                    eta, momentum, err_run))
            if err_run < best_err:
                best_err = err_run
                best_params["eta"] = eta
                best_params["momentum"] = momentum
                if verbose:
                    print(
                        "New best combination: Eta = {}, momentum = {}, avg_err = {}"
                        .format(eta, momentum, err_run))

    print("Best result found! Eta = {}, momentum = {}, avg_err = {}".format(
        best_params["eta"], best_params["momentum"], best_err))
    return best_params