Exemplo n.º 1
0
def run_bigger_example():
    x = Tensor([1, 2, 3])
    y = Tensor([7, 10])
    print(x.shape, y.shape)

    linear1 = Linear(x.shape[0], x.shape[0], weight_init='ones')
    linear2 = Linear(x.shape[0], y.shape[0], weight_init='ones')

    net_2layer = Network([linear1, linear2])
    pred_2layer = net_2layer.forward(x)

    #loss.backward()
    print("pred_2layer is ")
    print(pred_2layer)
    mse = MSE()
    loss = mse.forward(pred_2layer, y)
    print("loss for 2 layer net is ")
    print(loss)
    # Should be 2*(18-7) = 22
    loss_grad = mse.backward()
    print("loss_grad for 2layer net is ")
    print(loss_grad)
    print("Printing params Grad before ")
    for layer in net_2layer.layers:
        for par_grad in layer.param_grad():
            print(par_grad)

    print("now setting param grad to zero")
    net_2layer.zero_grad()
    print("Printing params Grad after ")
    for layer in net_2layer.layers:
        for par_grad in layer.param_grad():
            print(par_grad)
    print("Printing params before backward")
    for layer in net_2layer.layers:
        for par in layer.param():
            print(par)
    print("Doing backward pass")
    net_2layer.backward(loss_grad)
    print("Printing params after backward")
    for layer in net_2layer.layers:
        for par in layer.param():
            print(par)
    print("Printing params Grad")
    for layer in net_2layer.layers:
        for par_grad in layer.param_grad():
            print(par_grad)
    print("Doing param update")
    net_2layer.grad_step(lr=1e-3)
    print("Printing params after update")
    for layer in net_2layer.layers:
        for par in layer.param():
            print(par)
Exemplo n.º 2
0
    def __init__(self, coefficient):
        """
        Parameters:
            :param coefficient: (str) coefficient's name
        """

        if coefficient == 'Gini':
            self.coefficient = Gini()
        elif coefficient == 'MSE':
            self.coefficient = MSE()
        else:
            logger.error('Invalid coefficient %s', self.coefficient)
            raise NotImplementedError
Exemplo n.º 3
0
class Criterion:
    """
    This class chooses and computes the chosen coefficient for two data sets

    Attributes:
        :param coefficient: (str) coefficient's name
    """
    def __init__(self, coefficient):
        """
        Parameters:
            :param coefficient: (str) coefficient's name
        """

        if coefficient == 'Gini':
            self.coefficient = Gini()
        elif coefficient == 'MSE':
            self.coefficient = MSE()
        else:
            logger.error('Invalid coefficient %s', self.coefficient)
            raise NotImplementedError

    def compute(self, subset1, subset2):
        """
        Returns weighted sum of :param subset1 and :param subset2 chosen coefficients

        Parameters:
            :param subset1: (DataSet) one of the data sets to compute the coefficient from
            :param subset2: (DataSet) one of the data sets to compute the coefficient from
        """

        size1 = subset1.X.shape[0]
        size2 = subset2.X.shape[0]

        return (self.coefficient.compute(subset1) * size1 +
                self.coefficient.compute(subset2) * size2) / (size1 + size2)
Exemplo n.º 4
0
training_x, training_y, test_x, test_y = Dataset.split_dataset(ds.data['income'], ds.data['happiness'], 0.8)

# Create and fit a Linear Regression model
lr_model = LinearRegression()
lr_model.fit(training_x, training_y)

# Create and fit a Linear Regression model
lrb_model = LinearRegressionB()
lrb_model.fit(training_x, training_y)

# Predict test set
lr_results = lr_model.predict(test_x)
lrb_results = lrb_model.predict(test_x)

# Evaluate MSE error
mse = MSE()
lr_mse = mse(test_y, lr_results)
lrb_mse = mse(test_y, lrb_results)
print("Linear regression MSE: " + str(round(lr_mse, 4)))
print("Linear regression B MSE: " + str(round(lrb_mse, 4)))

# Plot results
plt.figure(2, figsize=(8, 8))
plt.title("Test results")
plt.scatter(test_x, test_y, label='Test dataset')
plt.plot(test_x, lr_results, label='Linear Regression')
plt.plot(test_x, lrb_results, label='Linear Regression B')
plt.legend()
plt.show()

var_mean = np.sum()
Exemplo n.º 5
0
    def remove(self, module):
        raise NotImplementedError('implement remove to sequential!')

    def forward(self, inputs, outputs):
        self.outputs = inputs
        self.real = outputs
        for layer in self.layers:
            self.outputs = layer.forward(self.outputs, self.real)
        return self.outputs

    def backward(self, *args, **kwargs):
        self.grad_input = self.outputs
        for layer in self.layers[::-1]:
            self.grad_input = layer.backward(self.grad_input)
        return self.grad_input


if __name__ == "__main__":
    model = Sequential()
    model.add(Linear(3, 10))
    #model.add(Sigmoid(10))
    model.add(MSE(10))
    X = np.array([[3., 4., 5.]]).T
    y = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).T
    for i in xrange(10000):
        model.forward(X, y)
        model.backward()

    print(model.forward(X, y))
    #print(model.layers[-1].Loss)
Exemplo n.º 6
0
# y = np.exp(x * np.pi / 180.)

noise = np.random.normal(0, .1, y.shape)
noisy_y = y + noise

x_train = (x - x.mean()) / x.std()   # Normalization
y_train = noisy_y

# Polynomial regression
p_grade = 10
until_p_grade = False  # True: include all polynomial grades until p_grade | False: Only calculates p_grade
limit_grade = p_grade if until_p_grade else 0

res = np.zeros((4, limit_grade + 1, len(x)))
mse = np.zeros((4, limit_grade + 1))
mse_eval = MSE()

for i in range(limit_grade + 1):

    grade = i if until_p_grade else p_grade

    # Analytic Solution
    pr = PolynomialRegression(grade)

    # Numerical Solutions
    n_epochs = 2000
    learning_rate = 0.2
    reg_factor = 0.0001

    gr = GradientDescent(learning_rate, n_epochs, grade, reg_factor)
    st = StochasticGradientDescent(learning_rate, n_epochs, grade, reg_factor)
Exemplo n.º 7
0
                predictedDay3 = np.zeros(24)

        predictedDay = newW[0] * predictedDay1 + newW[1] * predictedDay2+ newW[2] * predictedDay3

    # denormalize:

    scalingFactor = finalDayToTestNonNorm[0] / finalDayToTest[0]
    predictedDay=predictedDay*np.linalg.norm(predictedDay)
    predictedDay = predictedDay*scalingFactor
    finalDayToTest = finalDayToTest*scalingFactor
    fileToSave = fileToSave + "_L1norm"

    mae = MAE(finalDayToTest,predictedDay)
    mape = MAPE(finalDayToTest,predictedDay)
    mre = MRE(finalDayToTest,predictedDay)
    mse = MSE(finalDayToTest,predictedDay)
    errorDfEnsemble[day] = [mae, mape, mre, mse]
    width = 12
    height = 10
    dpi = 70
    fig = plt.figure(figsize=(width, height), dpi=dpi)
    ax = fig.add_subplot(111)
    plt.plot(predictedDay, 'r-', label = 'predicted')
    plt.plot(finalDayToTest, label = "actual")
    plt.xticks(np.arange(0, 24, 1),fontsize=15)
    plt.yticks(fontsize=15)
    plt.xlim([0,23])
    plt.xlabel('Hours',fontsize=15)
    plt.ylabel('Load, MW',fontsize=15)
    plt.title("Predicted for "+str(finalDayDate.strftime('%d %b, %Y'))+" with ensamble of "+ str(numModels)+ " models, days trained = "+str(len(trainSet)),fontsize=18,y=1.03)
    plt.legend(loc='upper left')
Exemplo n.º 8
0
        AnnotatedCorpora(output_file).write_conllu(working_dir + work_space +
                                                   '/' + args.tagger_output)

    if args.mode == "iterative_train_and_test":

        raw_data_file = working_dir + "/train.raw.txt"
        raw_data_txt_file = working_dir + work_space + "/train.raw.txt"
        AnnotatedCorpora(raw_data_file).write_raw(raw_data_txt_file)
        raw_data_conllu_file = working_dir + work_space + "/train.raw.conllu"
        AnnotatedCorpora(raw_data_file).write_conllu(raw_data_conllu_file)
        raw_data_yaset_file = working_dir + work_space + "/train.raw.conll"
        AnnotatedCorpora(raw_data_file).write_yaset_format(raw_data_yaset_file)

        morfessor = Morfessor(working_dir + work_space)
        mse = MSE(working_dir + work_space, iterative=True)
        yaset = Yaset(working_dir + work_space, iterative=True)

        morpho_file, train_vocab_file = morfessor.make_morpho(raw_data_file)

        if args.use_mimick == 'True':
            full_vocab_file = mse.create_vocab([train_data_file, \
                                                dev_data_file, \
                                                test_data_file, \
                                                raw_data_conllu_file], \
                                               train_vocab_file)

        retagged_raw_conllu_file = raw_data_conllu_file

        for i in range(1, int(args.iter_number) + 1):
Exemplo n.º 9
0
def lbfPredict(X,dates,dateToPredict, method, fileToSave,windowSize=5,numClusters=3,numDaysToTrain=365, normalize = False):

    if normalize:
        daysBeforeDate = len(dates[dates < dateToPredict])
        finalDayToTestNonNorm = X[daysBeforeDate, :]
        X = X / np.linalg.norm(X, axis=-1)[:, np.newaxis]

    daysBeforeDate = len(dates[dates < dateToPredict])

    # we train on 11 months
    daysToTrain = X[daysBeforeDate - numDaysToTrain:daysBeforeDate]

    finalDayToTest = X[daysBeforeDate, :]
    finalDayDate = dates[daysBeforeDate]

    if method == 'k-means':
        # train kmeans
        model = KMeans(n_clusters=numClusters, random_state=0).fit(daysToTrain)
        labels = model.labels_
    if method == 'k-medoids':
        # train kmeans
        model = KMedoids(n_clusters=numClusters, random_state=0).fit(daysToTrain)
        labels = model.labels_
    if method == 'hierarchical':
        labels = AgglomerativeClustering(n_clusters=numClusters, affinity='euclidean', linkage='ward').fit_predict(daysToTrain)

    # find a label for a day before the day of interest
    dayBefore = labels[-1]

    # find sequence of 5 days preceding the day of interest
    sequenceToLook = labels[numDaysToTrain - windowSize:numDaysToTrain]

    # find the first indeces of the sequences
    indecesFound = searchSequenceNumpy(labels, sequenceToLook)

    while np.size(indecesFound) == 0:
        windowSize = windowSize - 1
        # find sequence of 5 days preceding the day of interest
        sequenceToLook = labels[numDaysToTrain - windowSize:numDaysToTrain]

        # find the first indeces of the sequences
        indecesFound = searchSequenceNumpy(labels, sequenceToLook)

    arrayOfSimilarDaysIdx = []

    for k in range(len(indecesFound) - 1):
        tmp = indecesFound[k] + windowSize + 1
        # we don't want the last 5 days to appear in the subeset of similar days
        if tmp.item(0) < len(daysToTrain) - windowSize:
            arrayOfSimilarDaysIdx.append(tmp.item(0))

    similarDays = daysToTrain[arrayOfSimilarDaysIdx]

    if np.size(arrayOfSimilarDaysIdx) > 1:
        predictedDay = np.mean(similarDays, 0)
    else:
        predictedDay = similarDays[0]


    # denormalize:
    if normalize:
        scalingFactor = finalDayToTestNonNorm[0] / finalDayToTest[0]
        predictedDay=predictedDay*np.linalg.norm(predictedDay)
        predictedDay = predictedDay*scalingFactor
        finalDayToTest = finalDayToTest*scalingFactor
        fileToSave = fileToSave + "_L1norm"


    mae = MAE(finalDayToTest, predictedDay)
    mape = MAPE(finalDayToTest, predictedDay)
    mre = MRE(finalDayToTest, predictedDay)
    mse = MSE(finalDayToTest, predictedDay)

    # plot for one day, nyc
    w = 10
    h = 10
    d = 70
    fig = plt.figure(figsize=(w, h), dpi=d)
    ax = fig.add_subplot(111)
    plt.plot(predictedDay, 'r-', label='predicted')
    plt.plot(finalDayToTest, label="actual")
    plt.xticks(np.arange(0, 24, 1), fontsize=15)
    plt.yticks(fontsize=15)
    plt.xlim([0, 23])
    plt.xlabel('Hours', fontsize=15)
    plt.ylabel('Load, MW', fontsize=15)
    plt.title("Predicted for " + str(finalDayDate.strftime('%d %b, %Y')) + ", "+method+", k = " + str(
        numClusters) + ", days trained = " + str(len(daysToTrain)) + ", days avg = " + str(
        np.size(arrayOfSimilarDaysIdx)), fontsize=16, y=1.03)
    plt.legend(loc='upper left')
    ax.text(0.02, 0.87, 'mae = ' + str(np.round(mae, 3)), transform=ax.transAxes)
    ax.text(0.02, 0.84, 'mape = ' + str(np.round(mape, 3)), transform=ax.transAxes)
    ax.text(0.02, 0.81, 'mre = ' + str(np.round(mre, 3)), transform=ax.transAxes)
    ax.text(0.02, 0.78, 'mse = ' + str(np.round(mse, 3)), transform=ax.transAxes)

    fig.savefig(fileToSave, bbox_inches='tight',)
    return (mae, mape,mre,mse)
Exemplo n.º 10
0
import numpy as np

from nn import nn
from sigmoid import sigmoid
from MSE import MSE

fc1 = nn(10, 5, 0.1)
sig1 = sigmoid()
fc2 = nn(5, 2, 0.1)
sig2 = sigmoid()
mse = MSE()
x = np.random.randn(10)  # 学習データ生成
t = np.random.randn(2)  # 教師データ生成
for i in range(100):
    out = sig2.forward(fc2.forward(sig1.forward(fc1.forward(x))))
    loss = mse.forward(out, t)
    print(loss)
    grad = mse.backward(out, t)
    fc1.backward(sig1.backward(fc2.backward(sig2.backward(grad))))
    fc1.update()
    fc2.update()
Exemplo n.º 11
0
    y = Tensor([7, 10])
    print(x.shape, y.shape)

    #linear_a = Linear(x.shape[1], 4, weight_init='ones')
    #linear_b = Linear(x.shape[0], y.shape[0], weight_init='ones')
    #relu = Relu()
    #net_2layer = Network([linear_a], 2)#, relu, linear_b])
    #print(x.view(-1, 2).shape)
    #print(net_2layer.forward(x.view(-1, 2)))

    linear1 = Linear(x.shape[0], x.shape[0], weight_init='ones')
    linear2 = Linear(x.shape[0], y.shape[0], weight_init='ones')

    net_2layer = Network([linear1, linear2], 1)

    mse = MSE()

    lr = 1e-3
    num_iter = 200

    timesteps = []
    loss_at_timesteps = []

    for it in range(num_iter):

        net_2layer.zero_grad()
        pred_2layer = net_2layer.forward(x)
        loss = mse.forward(pred_2layer, y)
        print("At iteration ", str(it), " the loss is ", loss)
        loss_grad = mse.backward()
        net_2layer.backward(loss_grad)
Exemplo n.º 12
0
bias_init = 'zero'
layers = []

linear = Linear(2, 25, weight_init=weight_init, bias_init=bias_init)
layers.append(linear)
layers.append(Relu())
for i in range(num_hidden - 1):
    layers.append(Linear(25, 25, weight_init=weight_init, bias_init=bias_init))
    layers.append(Relu())
layers.append(Linear(25, 2, weight_init=weight_init, bias_init=bias_init))
layers.append(Tanh())
net_2layer = Network(layers, train_input.shape[0])

# Choose loss

mse = MSE()

# Choose parameters

lr = 0.05
num_iter = 1000

timesteps = []
loss_at_timesteps = []

# Train model

for it in range(num_iter):

    net_2layer.zero_grad()