def main(): # Generate and split data # Try and play with arguments all_data = data.generate_data_gauss(numSamples=1000, noise=0.5) train_data, valid_data = data.split_data(all_data, val_factor=0.3) # Set show to True if you want to see generated dataset data.plot_data(train_data, valid_data, show=False) # Directory to save summaries to # From your conda environment run # tensorbard --logdir ../tf_playground/output # to see training details output = utils.get_output_dir() # Create model # Go to model.py file to make changes to the model model = Model() # Lets train # Try changing number of epochs and batch_size trainer = Trainer(train_data=train_data, valid_data=valid_data, model=model, epochs=10, batch_size=2, output=output) trainer.train() trainer.save_final_accuracy()
def simpleLinearRegression(data): ## Linear Regression plot_data(x=data['sqft_living'], y=data['price'], filename='price_sqft.png') linear_regression(data=data, x='sqft_living', y='price', filename='linear_regression.png')
def _save_val_predictions_plot(self): fig = plot_data(self.val_predictions) img = utils.plot_to_image(fig) summary = tf.Summary(value=[ tf.Summary.Value(tag="Val predictions", image=tf.Summary.Image( encoded_image_string=img, height=6, width=6)) ]) self.valid_summary_writer.add_summary(summary, self._epochs_training)
def read_excel_data(): global measurements_dict, selected_option files = [] while True: _files = [f for f in os.listdir('measurements') if f[-4:] == 'xlsx'] if _files != files: files = _files measurements_dict = {} for f in files: df = read_data('measurements/%s' % f) X = df.to_numpy() fig = plot_data(X) tab, df_tab = table_html(X) measurements_dict[f] = [tab, df_tab, fig] if selected_option == None: selected_option = f time.sleep(1)
from model import Perceptron, LinearRegression # MAIN SCRIPT: # --- Classification --- # Generates some linearly separable data and applies the perceptron # Results are plotted after instantiation, one instance, one epoch, and completion # xs, ys = generate_data(binary=True) # myPerceptron = Perceptron() # plot_data(xs, ys, myPerceptron) # myPerceptron.train(xs[0], ys[0]) # plot_data(xs, ys, myPerceptron) # myPerceptron.fit(xs, ys, max_epochs=1) # plot_data(xs, ys, myPerceptron) # myPerceptron.fit(xs, ys) # plot_data(xs, ys, myPerceptron, final=True) # # --- Linear Regression --- # # Generates some linear data and applies linear regression # # Results are plotted after instantiation, one instance, one epoch, and convergence xs, ys = generate_data(binary=False) myLinearRegression = LinearRegression() plot_data(xs, ys, myLinearRegression) myLinearRegression.train(xs[0], ys[0]) plot_data(xs, ys, myLinearRegression) myLinearRegression.fit(xs, ys, max_epochs=1) plot_data(xs, ys, myLinearRegression) myLinearRegression.fit(xs, ys) plot_data(xs, ys, myLinearRegression, final=True) print(myLinearRegression)
def test(self, ld_te, loss_fn=None, iw_fn=None, ld_name='', verbose=False): """This function compute precision and coverage of the psuedo-labeling function.""" # compute precision prec_vec = [] n_conf = 0 n = 0 for x, y in ld_te: prec_i, n_conf_i = self.loss_fn_test( x, y, lambda x: self.model_pred(x, training=False)['logits'], self.model, model_iw=iw_fn, reduce='none') prec_vec.append(prec_i) n_conf += n_conf_i n += y.shape[0] prec = tf.math.reduce_mean(tf.cast(tf.concat(prec_vec, 0), tf.float32)) if verbose: ## print print( '[test%s] T = %f, precision = %.2f%%, size = %d/%d = %.2f%%' % (ld_name if ld_name is '' else ' on %s' % (ld_name), self.model.T if hasattr(self.model, 'T') else -1, prec * 100.0, n_conf, n, float(n_conf) / float(n) * 100.0)) ## visualize for 2d data x_list = [] y_list = [] show = True for x, y in ld_te: if x.shape[-1] is not 2 or any(y > 1): show = False break conf = self.model(x) i_conf = conf == 1 x_list.append(x) y_list.append(y + 2 * tf.cast(i_conf, tf.int64)) if show: x_list = tf.concat(x_list, 0).numpy() y_list = tf.concat(y_list, 0).numpy() plot_data( x_list, y_list, markers=['s', 's', 's', 's'], colors=['r', 'g', 'k', 'k'], facecolors=['r', 'g', 'r', 'g'], alphas=[0.5, 0.5, 1.0, 1.0], labels=[ r'$-$', r'$+$', r'$-$' + ' (conf)', r'$+$' + ' (conf)' ], markersize=4, linewidth=2, classifier=lambda x: tf.nn.softmax( self.model_pred(tf.constant(x, dtype=tf.float32), training=False)['logits'], -1).numpy(), fn=os.path.join(self.params.save_root, "conf_examples_%s" % (ld_name)), ) return prec, n_conf, n