Пример #1
0
def evaluate(args):
    dataset = Dataset(args.dataset)
    model = LinearRegression()
    print('Loading parameters from', args.model_parameters, '\n')
    model.load_parameters(args.model_parameters)
    display_model_metrics(model, dataset)
    display_sklearn_metrics(dataset)
Пример #2
0
def predict(args):
    model = LinearRegression()
    print('Loading parameters from', args.model_parameters, '\n')
    model.load_parameters(args.model_parameters)
    print('Model:', model, '\n')
    x = []
    for index in range(len(model.weights)):
        x.append(float(input(f'Enter feature n.{index + 1}: ')))
    x = Dataset.preprocess(x, model.x_max, model.x_min)
    print('Predicted value:', model.predict(x))
Пример #3
0
def main():
    # mode argument
    args = argparse.ArgumentParser()
    args.add_argument("--lr", type=float, default=0.0001)
    args.add_argument("--cuda", type=bool, default=True)
    args.add_argument("--num_epochs", type=int, default=50000)
    args.add_argument("--model_name", type=str, default="5")
    args.add_argument("--batch", type=int, default=2)
    args.add_argument("--mode", type=str, default="train")
    args.add_argument("--prediction_dir", type=str, default="prediction")
    args.add_argument("--print_iter", type=int, default=20000)

    config = args.parse_args()

    lr = config.lr
    cuda = config.cuda
    num_epochs = config.num_epochs
    model_name = config.model_name
    batch = config.batch
    mode = config.mode
    prediction_dir = config.prediction_dir
    print_iter = config.print_iter
    nIn = 12
    nOut = 1

    model = LinearRegression(nIn, nOut)
    device = torch.device('cuda') if cuda else torch.device('cpu')

    #check parameter of model
    print("------------------------------------------------------------")
    total_params = sum(p.numel() for p in model.parameters())
    print("num of parameter : ", total_params)
    trainable_params = sum(p.numel() for p in model.parameters()
                           if p.requires_grad)
    print("num of trainable_ parameter :", trainable_params)
    print("------------------------------------------------------------")

    if mode == 'train':
        print('train start')
        train_loader = dataloader.data_loader(DATASET_PATH,
                                              batch,
                                              phase='train')
        val_loader = dataloader.data_loader(DATASET_PATH, 1, phase='val')
        params = [p for p in model.parameters() if p.requires_grad]
        optimizer = torch.optim.Adam(params=params, lr=lr)
        lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                       step_size=40,
                                                       gamma=0.1)
        train(num_epochs, model, device, train_loader, val_loader, optimizer,
              lr_scheduler, prediction_dir, print_iter)
    elif mode == 'test':
        print('test start')
        test_loader = dataloader.data_loader(DATASET_PATH, 1, phase='test')
        load_model(model_name, model)
        test(model, device, test_loader, prediction_dir)
Пример #4
0
    def estimate(self, ModelFile):
        if self.conf['model'] not in self.MODELS:
            print >> sys.stderr, 'error: invalid model - "%s"' % self.conf[
                'model']
            sys.exit(1)

        param = cPickle.load(open(ModelFile))
        sample_data = linecache.getline(self.input_file, 1).strip().split(',')

        if self.conf['model'] == 'linear-regression':
            model_lr = LinearRegression(self.conf,
                                        len(sample_data),
                                        param=param)
            model_lr.estimate(self.input_file)
def randomLine():
    model = LinearRegression()

    a = random.randrange(-5, 11, 1)
    b = random.randrange(-5, 6, 1)

    print('Target: b =', b, 'a = ', a)

    for i in range(0, 500):
        X = random.randrange(-4, 7, 1)
        y = b + a * X + np.random.randn()
        model.addSample(X, y)

    alpha = 0.001
    numOfSteps = 100
    loop = 50

    for i in range(0, loop):
        # for printing the values of theta and the cost, during the 0th iteration.
        if i == 0:
            X, y = model.getSamples()
            initial_cost = model.getLoss(model.theta, X, y)
            print(
                "Current Hypothesis:", "%s + %s x ," %
                (round(model.theta[0][0], 2), round(model.theta[1][0], 2)),
                "cost =", round(initial_cost, 4))
    ################################################################################

        theta, cost = model.fit(alpha, numOfSteps)
        print("Current Hypothesis:", model.__str__(), ",cost =",
              round(cost, 4))
def setLine():
    model = LinearRegression()

    for i in range(0, 1000):
        model.addSample(i, i)

    alpha = 0.000000003
    numOfSteps = 100
    loop = 50

    for i in range(0, loop):
        if i == 0:
            # for printing the values of theta and the cost, during the 0th iteration.
            X, y = model.getSamples()
            initial_cost = model.getLoss(model.theta, X, y)
            print(
                "Current Hypothesis:", "%s + %s x ," %
                (round(model.theta[0][0], 2), round(model.theta[1][0], 2)),
                "cost =", round(initial_cost, 4))
            #########################################################################

        theta, cost = model.fit(alpha, numOfSteps)
        # print(theta,round(cost,2))
        # print(model.getTheta0())
        # print(model.getTheta1())
        # print(model.getIteration())
        # print(model.getHypothesis())
        print("Current Hypothesis:", model.__str__(), ",cost =",
              round(cost, 4))
Пример #7
0
def setLine():
    model = LinearRegression()

    for i in range(0, 1000):
        model.addSample(i, i)

    alpha = 0.000000003
    numOfSteps = 100
    loop = 50

    for i in range(0,loop):
        theta, cost= model.fit(alpha, numOfSteps)
        print("Current Hypothesis:", model.__str__(), ",cost =", round(cost, 4))

        fig = plt.figure()
        ax = fig.add_subplot(1, 1, 1)
        x = np.linspace(0, 1000)
        ax.spines['right'].set_color('none')
        ax.spines['top'].set_color('none')
        ax.xaxis.set_ticks_position('bottom')
        ax.yaxis.set_ticks_position('left')
        plt.plot(x, x, '-b', label='y=x')
        plt.plot(x, round(theta[0][0], 2) + round(theta[1][0], 2) * x, '-.g', label= model.__str__())
        plt.legend(loc='upper left')
        plt.show()
def randomDimension(features):
    model = LinearRegression()

    # X = np.ndarray(shape = (features, 1))
    X = [None] * features

    r = np.ndarray(shape = (features, 1))
    theta = np.ndarray(shape = (features+1, 1))
    #
    # data = np.ndarray(shape = (5000,features))

    for f in range(0, theta.shape[1]):
        theta[f] = random.randrange(-100, 101, 1)
        # print(theta.shape)
        # print(type(theta))
        # print(theta)

    for i in range(0,5000):
        # print(i)
        X = []
        for f in range(0, features):
            # print(f)
            rn = random.randrange(0.0, 1.0)
            # X.insert(f, rn)
            X.append(rn)
            # print(X.shape)
            # print(type(X))
            # print(X)
            r = X[f] * theta[f]
        r = r + theta[-1] + random.randrange(-20, 20, 1)
        # r = random.randrange(-20, 20, 1)
        model.addSample(X,r)



    alpha = 0.000000003
    numOfSteps = 1000
    loop = 100

    for i in range(0, loop):
        theta, cost = model.fit(alpha, numOfSteps)
        print("theta values", theta,"cost", round(cost, 4))
Пример #9
0
def train(dataset, args):
    n_weights = np.size(dataset.x, 1)
    model = LinearRegression(n_weights=n_weights)
    optimizer = Optimizer(dataset)
    losses = []
    for _ in range(args.epochs):
        losses.append(mse(model, dataset))
        optimizer.step(model, args.learning_rate)
    print ('Model:', model, '\n')
    print ('Loss:', mse(model, dataset))
    sklearn_model = SklearnLinearRegression().fit(dataset.x, dataset.y)
    print ('Loss with sklearn:', mean_squared_error(dataset.y, sklearn_model.predict(dataset.x)), '\n')
    if args.plot:
        plot(model, n_weights, losses, dataset)
    return model
def main():
    model = LinearRegression(1, 1).to(device)
    x_train, y_train = get_training_data()
    train(model, x_train, y_train)

    model_save_path = "./AzureMLModel"
    save_pytorch_cloudpickle_model(model,
    path=model_save_path,
    local_dependencies=["."])
    loaded_generic_model = load_generic_model(model_save_path)
    df = pd.DataFrame({"x": [[10.0], [11.0], [12.0]]})
    predict_result = loaded_generic_model.predict(df)
    assert predict_result.shape[0] == df.shape[0]

    loaded_pytorch_model = loaded_generic_model.raw_model
    assert isinstance(loaded_pytorch_model, torch.nn.Module)
Пример #11
0
def randomLine():
    model = LinearRegression()

    a = random.randrange(-5, 11, 1)
    b = random.randrange(-5, 6, 1)

    print('Target: b =', b, 'a = ', a)

    for i in range(0, 500):
        X = random.randrange(-4, 7, 1)
        y = b + a * X + np.random.randn()
        model.addSample(X, y)


#for diagrams
    X, y, z = model.getSamples()
    temp = z
    ####

    alpha = 0.001
    numOfSteps = 100
    loop = 50

    for i in range(0, loop):
        theta, cost = model.fit(alpha, numOfSteps)
        print("Current Hypothesis:", model.__str__(), ",cost =",
              round(cost, 4))

        fig = plt.figure()
        ax = fig.add_subplot(1, 1, 1)
        ax.spines['left'].set_position('center')
        ax.spines['bottom'].set_position('center')
        ax.spines['right'].set_color('none')
        ax.spines['top'].set_color('none')
        ax.xaxis.set_ticks_position('bottom')
        ax.yaxis.set_ticks_position('left')
        plt.scatter(temp, y)
        plt.plot(temp,
                 round(model.theta[0][0], 2) +
                 round(model.theta[1][0], 2) * temp,
                 '-.b',
                 label=model.__str__())
        plt.legend(loc='upper left')
        plt.show()
Пример #12
0
    def learning(self):
        if self.conf['model'] not in self.MODELS:
            print >> sys.stderr, 'error: invalid model - "%s"' % self.conf[
                'model']
            sys.exit(1)

        if self.conf['model'] == 'linear-regression':
            model_lr = LinearRegression(self.conf,
                                        len(self.feature_name),
                                        param=self.fine_tune_param,
                                        loss_file=self.loss_file)
            model_lr.learning(self.train_data, self.train_Y, self.test_data,
                              self.test_Y)
            model_lr.print_model(self.feature_name)
def main():
    init_params = {"inputSize": 1, "outputSize": 1}
    model = LinearRegression(**init_params).to(device)
    x_train, y_train = get_training_data()
    train(model, x_train, y_train)

    model_save_path = "./AzureMLModel"
    local_dependencies = ["."]

    save_pytorch_state_dict_model(model,
                                  init_params=init_params,
                                  path=model_save_path,
                                  local_dependencies=local_dependencies)
    loaded_generic_model = load_generic_model(model_save_path)
    df = pd.DataFrame({"x": [[10.0], [11.0], [12.0]]})
    predict_result = loaded_generic_model.predict(df)
    assert predict_result.shape[0] == df.shape[0]

    loaded_pytorch_model = loaded_generic_model.raw_model
    assert isinstance(loaded_pytorch_model, torch.nn.Module)
Пример #14
0
def setPlane():
    model = LinearRegression()

    for i in range(0, 1000):

        model.addSample([i, 2 * i], 5 * i)
        model.addSample([2 * i, i], 4 * i)

    alpha = 0.000000006
    numOfSteps = 1000
    loop = 10

    for i in range(0, loop):
        theta, cost = model.fit(alpha, numOfSteps)
        # print(round(theta[0][0],2),round(theta[1][0],2),round(theta[2][0],2), round(cost, 4))
        # print(model.getTheta0())
        # print(model.getTheta1())
        # print(model.getTheta2())
        # print(model.getIteration())
        # print(model.getHypothesis())
        print("Current Hypothesis:", model.__str__(), ",cost =",
              round(cost, 4))
def randomPlane():
    model = LinearRegression()

    a = random.randrange(-100, 101, 1)
    b = random.randrange(-100, 101, 1)
    c = random.randrange(-100, 101, 1)

    print('Target: c =', c, 'a = ', a, 'b = ', b)

    for i in range(0,5000):
        x1 = random.randrange(0, 2, 1)
        x2 = random.randrange(0, 2, 1)
        y = (a * x1) + (b * x2) + (c) + random.randrange(-20, 20, 1)
        model.addSample([x1, x2], y)


    alpha = 0.0001
    numOfSteps = 1000
    loop = 100

    for i in range(0, loop):
        theta, cost = model.fit(alpha, numOfSteps)
        print("Current Hypothesis:", model.__str__(), ",cost =", round(cost, 4))
Пример #16
0
from script import trainingInput, trainingOutput, testInput, testOutput
from model import LinearRegression
import torch

print(trainingInput)
print(trainingOutput)

print(trainingInput.size())
print(trainingOutput.size())

inputSize = 18
outputSize = 1

model = LinearRegression(inputSize, outputSize)
print(model)

criterion = torch.nn.MSELoss(size_average=False)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)

for epoch in range(500):

    # Forward pass: Compute predicted y by passing
    # x to the model
    predictedOutput = model(trainingInput)

    # Compute and print loss
    loss = criterion(predictedOutput, trainingOutput)

    # Zero gradients, perform a backward pass,
    # and update the weights.
    optimizer.zero_grad()
Пример #17
0
from model import Perceptron, LinearRegression

# MAIN SCRIPT:

# --- Classification ---
# Generates some linearly separable data and applies the perceptron
# Results are plotted after instantiation, one instance, one epoch, and completion
# xs, ys = generate_data(binary=True)
# myPerceptron = Perceptron()
# plot_data(xs, ys, myPerceptron)
# myPerceptron.train(xs[0], ys[0])
# plot_data(xs, ys, myPerceptron)
# myPerceptron.fit(xs, ys, max_epochs=1)
# plot_data(xs, ys, myPerceptron)
# myPerceptron.fit(xs, ys)
# plot_data(xs, ys, myPerceptron, final=True)

# # --- Linear Regression ---
# # Generates some linear data and applies linear regression
# # Results are plotted after instantiation, one instance, one epoch, and convergence
xs, ys = generate_data(binary=False)
myLinearRegression = LinearRegression()
plot_data(xs, ys, myLinearRegression)
myLinearRegression.train(xs[0], ys[0])
plot_data(xs, ys, myLinearRegression)
myLinearRegression.fit(xs, ys, max_epochs=1)
plot_data(xs, ys, myLinearRegression)
myLinearRegression.fit(xs, ys)
plot_data(xs, ys, myLinearRegression, final=True)
print(myLinearRegression)
Пример #18
0
    parser.add_argument("--n_layer", type=int, default=2)
    parser.add_argument("--num_nodes", type=int, default=17531)

    # result args
    parser.add_argument("--result_folder", type=str)
    parser.add_argument("--gpu", type=str, default="0")

    args = parser.parse_args()

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    if not os.path.exists(args.result_folder):
        os.mkdir(args.result_folder)

    args.log = os.path.join(args.result_folder, args.log)

    src_id, dst_id = load_graph()

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    if args.model == "lr":
        model = LinearRegression(3, args.hid_c, args.h_step)
    elif args.model == "norm_lr":
        model = NormalizeLR(3, args.hid_c, args.h_step)
    else:
        model = PredictModel(args.model, src_id, dst_id, 3, args.hid_c,
                             args.h_step, args.n_layer, device)

    train_main(model, args, True)
    # test_main(args.log, model)
Пример #19
0
import numpy as np
from util import read_data
from model import LinearRegression
from optimizer import SGD
from functions import least_error_square

#  방법 1 Gradient Decent 사용
data = read_data('../data/count_by_date.csv')
x, t = list(map(lambda x: [int(x)],
                data['INDEX'])), list(map(lambda x: [int(x)], data['CNT']))
x, t = np.array(x), np.array(t)
model = LinearRegression()
optimizer = SGD(lr=0.08)
epoch = 10000

for e in range(epoch):
    loss = model.forward(x, t)  # 오차값
    model.backward()
    optimizer.update(model.params, model.grads)
    m, b = model.get_params()  # 기울기, 편향
    # print(loss, m[0][0], b[0][0])

print(loss, m[0][0], b[0][0])
print(model.predict(np.array([[i]
                              for i in range(31, 61)])))  # INDEX가 31부터 60까지 추론

# 방법 2 최소오차제곱법 사용
data = read_data('../data/count_by_date.csv')
x, t = list(map(lambda x: [int(x), 1],
                data['INDEX'])), list(map(lambda x: [int(x)], data['CNT']))
x, t = np.array(x), np.array(t)