def RBM(): filename = "../data/smaller.dta" raw_data = open(filename, 'rt') data = np.loadtxt(raw_data, delimiter=" ") X = data[:, :3] Y = data[:, 3] print(X) print(Y) print("training on RBM") rbm = BernoulliRBM(random_state=0, verbose=True) rbm.learning_rate = 0.06 rbm.n_iter = 20 rbm.n_components = 100 rbm.fit(X, Y) predictions = rbm.transform(X) params = rbm.get_params() print("predictions = ", predictions) print("rbm = ", rbm) print("params = ", params)
y = ub_data['Personal Loan'] # for bernoullirbm all values should be of float type X = X.astype("float32") #scale x values from 0-1 X = scale(X) # construct the training/testing split (trainX, testX, trainY, testY) = train_test_split(X, y, test_size=0.3, random_state=42) # initialize the RBM + Logistic Regression pipeline logistic = LogisticRegression() # BernoulliRBM parameters,n_componets:int,number of binary hidden units;learning_rate:float,rate for weight updates # n_iter : int, Number of iterations/sweeps over the training dataset to perform during training. # verbose : int,verbosity level;verbose : int,A random number generator instance to define the state of the random permutations generator rbm = BernoulliRBM(n_components=2,learning_rate=0.1,n_iter=10,\ random_state=None,verbose=10) classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)]) classifier.fit(trainX, trainY) # Get parameters for this estimator.deep: boolean,If True, will return the parameters for this estimator and contained subobjects that are estimators. print rbm.get_params(deep=True) #Compute the pseudo-likelihood of argument.it computes a quantity called the free energy on X, then on a randomly corrupted version of X, # and returns the log of the logistic function of the difference. #print rbm.score_samples(trainX) print("using RBM features:\n%s\n" % (classification_report(testY, classifier.predict(testX))))