def train_test_linear_regression(train_test_data, show_plot=False, save_model=False): # Commented out, was doing this to compare different methods of cross validation # # can use cross_validate instead of cross_val_score to evaluate more metrics than just R^2 # scoring = ["r2", "neg_mean_squared_error", "neg_root_mean_squared_error"] # # 1) time series split # tscv = TimeSeriesSplit(n_splits=5) # print(tscv) # # Below shows the timeseries splits if we want to see them # for train, test in tscv.split(X_train): # print("%s %s" % (train, test)) # ts_scores = cross_validate(LinearRegression(), X_train, y_train, cv=tscv, scoring=scoring) # print(ts_scores) # print("Loss: {0:.3f} (+/- {1:.3f})".format(ts_scores["test_r2"].mean(), ts_scores["test_r2"].std())) # print("RMSE: {0:.3f} (+/- {1:.3f})".format(ts_scores["test_neg_root_mean_squared_error"].mean(), ts_scores["test_neg_root_mean_squared_error"].std())) # print("MIn RMSE: {0:.3f}".format(min(ts_scores["test_neg_root_mean_squared_error"]))) # # 2) K-fold # kfcv = KFold(n_splits=5) # print(kfcv) # kf_scores = cross_validate(LinearRegression(), X_train, y_train, cv=kfcv, scoring=scoring) # print(kf_scores) # print("Loss: {0:.3f} (+/- {1:.3f})".format(kf_scores["test_r2"].mean(), kf_scores["test_r2"].std())) # print("RMSE: {0:.3f} (+/- {1:.3f})".format(kf_scores["test_neg_root_mean_squared_error"].mean(), kf_scores["test_neg_root_mean_squared_error"].std())) # print("Min RMSE: {0:.3f}".format(min(kf_scores["test_neg_root_mean_squared_error"]))) model = models.LinearRegression() best_params = model.get_best_params(train_test_data) model.train(train_test_data.X_train, train_test_data.y_train, save_model=save_model) model.display_metrics(train_test_data, show_plot=show_plot, selected_params=best_params)
def create_net(num_classes, dnn='resnet20', **kwargs): ext = None if dnn in ['resnet20', 'resnet56', 'resnet110']: net = models.__dict__[dnn](num_classes=num_classes) elif dnn == 'resnet50': net = torchvision.models.resnet50(num_classes=num_classes) elif dnn == 'resnet101': net = torchvision.models.resnet101(num_classes=num_classes) elif dnn == 'resnet152': net = torchvision.models.resnet152(num_classes=num_classes) elif dnn == 'densenet121': net = torchvision.models.densenet121(num_classes=num_classes) elif dnn == 'densenet161': net = torchvision.models.densenet161(num_classes=num_classes) elif dnn == 'densenet201': net = torchvision.models.densenet201(num_classes=num_classes) elif dnn == 'inceptionv4': net = models.inceptionv4(num_classes=num_classes) elif dnn == 'inceptionv3': net = torchvision.models.inception_v3(num_classes=num_classes) elif dnn == 'vgg16i': # vgg16 for imagenet net = torchvision.models.vgg16(num_classes=num_classes) elif dnn == 'googlenet': net = models.googlenet() elif dnn == 'mnistnet': net = MnistNet() elif dnn == 'fcn5net': net = models.FCN5Net() elif dnn == 'lenet': net = models.LeNet() elif dnn == 'lr': net = models.LinearRegression() elif dnn == 'vgg16': net = models.VGG(dnn.upper()) elif dnn == 'alexnet': #net = models.AlexNet() net = torchvision.models.alexnet() elif dnn == 'lstman4': net, ext = models.LSTMAN4(datapath=kwargs['datapath']) elif dnn == 'lstm': # model = lstm(embedding_dim=args.hidden_size, num_steps=args.num_steps, batch_size=args.batch_size, # vocab_size=vocab_size, num_layers=args.num_layers, dp_keep_prob=args.dp_keep_prob) net = lstmpy.lstm(vocab_size=kwargs['vocab_size'], batch_size=kwargs['batch_size']) else: errstr = 'Unsupport neural network %s' % dnn logger.error(errstr) raise errstr return net, ext
def main(): parser = argparse.ArgumentParser() parser.add_argument('-m', '--model', type=str, choices=models.available_models, dest='model_name') args = parser.parse_args() if args.model_name == "LinearRegression": model = models.LinearRegression(1, 1, learning_rate=0.005) x_train = np.array([[2.3], [4.4], [3.7], [6.1], [7.3], [2.1], [5.6], [7.7], [8.7], [4.1], [6.7], [6.1], [7.5], [2.1], [7.2], [5.6], [5.7], [7.7], [3.1]], dtype=np.float32) y_train = np.array([[3.7], [4.76], [4.], [7.1], [8.6], [3.5], [5.4], [7.6], [7.9], [5.3], [7.3], [7.5], [8.5], [3.2], [8.7], [6.4], [6.6], [7.9], [5.3]], dtype=np.float32) t_x = Tensor(x_train) t_y = Tensor(y_train) tensor_dataset = TensorDataset(t_x, t_y) data_loader = DataLoader(tensor_dataset, batch_size=32) model.run(data_loader, model) elif args.model_name == "LogisticRegression": X_train, y_train = load_iris(return_X_y=True) t_x, t_y = torch.tensor(X_train, dtype=torch.float), torch.tensor(y_train, dtype=torch.long) tensor_dataset = TensorDataset(t_x, t_y) num_classes = len(set(y_train)) input_dim = X_train.shape[1] data_loader = DataLoader(tensor_dataset, batch_size=32, shuffle=True) model = models.LogisticRegression(input_dim, num_classes, learning_rate=0.01, epoch=1000) model.run(data_loader, model) elif args.model_name == "Convolution2D": n_epoch = 5 tr_batch_size, ts_batch_size = 32, 1024 data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]) tr_mnist = MNIST(root=os.path.realpath('../../dataset/mnist'), train=True, transform=data_transform, download=False) ts_mnist = MNIST(root=os.path.realpath('../../dataset/mnist'), train=False, transform=data_transform, download=False) train_loader = DataLoader(tr_mnist, batch_size=tr_batch_size, shuffle=True) test_loader = DataLoader(ts_mnist, batch_size=ts_batch_size, shuffle=True) conv_net = models.Convolution2D(n_epoch=n_epoch, log_per_batch=1000, train_batch_size=tr_batch_size) conv_net.run(train_loader, test_loader)
def create_net(num_classes, dnn='resnet20', **kwargs): ext = None if dnn in ['resnet20', 'resnet56', 'resnet110']: net = models.__dict__[dnn](num_classes=num_classes) elif dnn == 'resnet50': #net = models.__dict__['resnet50'](num_classes=num_classes) net = torchvision.models.resnet50(num_classes=num_classes) elif dnn == 'inceptionv4': net = models.inceptionv4(num_classes=num_classes) elif dnn == 'inceptionv3': net = torchvision.models.inception_v3(num_classes=num_classes) elif dnn == 'vgg16i': # vgg16 for imagenet net = torchvision.models.vgg16(num_classes=num_classes) elif dnn == 'googlenet': net = models.googlenet() elif dnn == 'mnistnet': net = MnistNet() elif dnn == 'fcn5net': net = models.FCN5Net() elif dnn == 'lenet': net = models.LeNet() elif dnn == 'lr': net = models.LinearRegression() elif dnn == 'vgg16': net = models.VGG(dnn.upper()) elif dnn == 'alexnet': net = torchvision.models.alexnet() elif dnn == 'lstman4': net, ext = models.LSTMAN4(datapath=kwargs['datapath']) elif dnn == 'lstm': net = lstmpy.lstm(vocab_size=kwargs['vocab_size'], batch_size=kwargs['batch_size']) else: errstr = 'Unsupport neural network %s' % dnn logger.error(errstr) raise errstr return net, ext
t_features, t_targets = gen_data.gaussian_data(1000) pred_targets = np.zeros(t_targets.shape) for i in range(2000): pred_targets[i] = clf.predict(t_features[i]) print(pred_targets) print ("accuracy is %f" % (len(np.where( pred_targets[:] == t_targets)[0]) \ /2000.0)) ''' '''regression''' weights = np.array([1.5, 2]) features, targets = gen_data.linear_data(weights, 1000, d=2) clf = models.LinearRegression() clf.fit_normal_equation(features, targets, add_intercept=True) t_features, t_targets = gen_data.linear_data(weights, 200, d=2) pred_targets = np.zeros(t_targets.shape) for i in range(200): pred_targets[i] = clf.predict(t_features[i]) n = pred_targets.size pairs = np.hstack((pred_targets.reshape(n, 1), t_targets.reshape(n, 1))) print pairs print("loss is %f" % (np.mean(np.square(pred_targets - t_targets))/ \ np.mean(t_targets)))