Ejemplo n.º 1
0
def main():
    train_ex, train_labels = prepare_continous_data('./bank-note/train.csv')
    test_ex, test_labels = prepare_continous_data('./bank-note/test.csv')
    train_labels = np.array([[-1] if l == 0 else [l] for l in train_labels])
    test_labels = np.array([[-1] if l == 0 else [l] for l in test_labels])

    epochs = 100
    lr = .0001
    variance = [0.01, 0.1, 0.5, 1, 3, 5, 10, 100]
    dp = [100 / 873, 500 / 873, 700 / 873]

    for v in variance:
        for d in dp:
            print('Variance: {:.2f}, \t Parameter d: {:.6f}'.format(v, d))
            LogisticClassifier = LogisticRegressionClassifier(lr,
                                                              v,
                                                              d,
                                                              mode='map')
            for epoch in range(1, epochs + 1):
                X, Y = shuffle_data(train_ex, train_labels)

                loss = LogisticClassifier.train_dataset(X, Y, epoch)
                # print('Epoch: {}\n\t Loss: {:.6f}'.format(epoch, loss[0]))

            preds, train_loss = LogisticClassifier.test_dataset(
                train_ex, train_labels)
            preds, test_loss = LogisticClassifier.test_dataset(
                test_ex, test_labels)
            print('Train error: {:.6f}'.format(train_loss))
            print('Test error: {:.6f}'.format(test_loss))
Ejemplo n.º 2
0
def python_model():
    train_ex, train_labels = prepare_continous_data('./bank-note/train.csv')
    test_ex, test_labels = prepare_continous_data('./bank-note/test.csv')
    train_labels = np.array([[-1] if l == 0 else [l] for l in train_labels])
    test_labels = np.array([[-1] if l == 0 else [l] for l in test_labels])

    epochs = 25
    lr = .001
    dp = [100 / 873, 500 / 873, 700 / 873]
    nodes = [5, 10, 25, 50, 100]
    num_features = 5

    for hidden_nodes in nodes:
        for d in dp:
            print('Number of hidden nodes: {}\t Parameter d: {}'.format(
                hidden_nodes, d))
            network = NeuralNetwork(num_features, hidden_nodes, lr, d)
            train_loss = 0
            for epoch in range(1, epochs + 1):
                X, Y = shuffle_data(train_ex, train_labels)

                train_loss += network.train_dataset(X, Y, epoch)
                # print('Epoch: {}\n\t Train Loss: {:.6f}'.format(epoch, loss[0,0]))
            train_err = network.test_dataset(train_ex, train_labels)
            test_err = network.test_dataset(test_ex, test_labels)

            print('Train Loss: {:.6f}'.format(train_err))
            print('Test Error: {:.6f}'.format(test_err))
Ejemplo n.º 3
0
def run_base_perceptron(train_ex, train_labels, test_ex, test_labels, T, lr):
	perceptron = BasePerceptron(.01)
	for t in range(T):
		shuffled_x, shuffled_y = shuffle_data(train_ex, train_labels)
		perceptron.train_dataset(shuffled_x, shuffled_y)

	predictions, err = perceptron.test_dataset(test_ex, test_labels)
	print("STANDARD PERCEPTRON:")
	print("error: ", err)
	print("weights: ")
	print(perceptron.weights)
Ejemplo n.º 4
0
def run_averaged_perceptron(train_ex, train_labels, test_ex, test_labels, T, lr):
	a_perceptron = AveragedPerceptron(.01)
	for t in range(T):
		shuffled_x, shuffled_y = shuffle_data(train_ex, train_labels)
		a_perceptron.train_dataset(shuffled_x, shuffled_y)

	predictions, err = a_perceptron.test_dataset(test_ex, test_labels)

	print("AVERAGED PERCEPTRON:")
	print("error: ", err)
	print("weights: ")
	print(a_perceptron.weights)
Ejemplo n.º 5
0
def run_voted_perceptron(train_ex, train_labels, test_ex, test_labels, T, lrlr):
	v_perceptron = VotedPerceptron(.01)
	for t in range(T):
		shuffled_x, shuffled_y = shuffle_data(train_ex, train_labels)
		v_perceptron.train_dataset(shuffled_x, shuffled_y)

	predictions, err = v_perceptron.test_dataset(test_ex, test_labels)

	print("VOTED PERCEPTRON:")
	print("error: ", err)
	print("voted weights: ")
	for (c, w) in v_perceptron.m_weights:
		print("counts: ", c)
		print("weight vector: \n", w)
	print("number of votes: ", v_perceptron.m)
Ejemplo n.º 6
0
def run_primal_svm(train_ex, train_labels, test_ex, test_labels, epochs, lr,
                   C):
    for c in C:
        print("Training Primal SVM with C = {:.8f}".format(c))
        primal_svm = PrimalSVM(c, lr)

        for epoch in range(epochs):
            x_s, y_s = shuffle_data(train_ex, train_labels)
            primal_svm.train_dataset(x_s, y_s)
            # print(primal_svm.weights)
            __, train_err = primal_svm.test_dataset(train_ex, train_labels)
            __, test_err = primal_svm.test_dataset(test_ex, test_labels)
            primal_svm.lr = lr / (1 + (lr / 100) * epoch)

        print("Train Error: ", train_err)
        print("Test Error: ", test_err)
        print('Weights:')
        for i in range(len(primal_svm.weights)):
            print(primal_svm.weights[i][0])
Ejemplo n.º 7
0
def run_dual_svm(train_ex,
                 train_labels,
                 test_ex,
                 test_labels,
                 epochs,
                 lr,
                 C,
                 kernel=None):
    for c in C:
        print("Training Dual SVM with C = {:.8f}".format(c))
        dual_svm = DualSVM(c, lr, kernel=kernel)

        x_s, y_s = shuffle_data(train_ex, train_labels)
        y_s = y_s.reshape((y_s.shape[0]))
        dual_svm.train_dataset(x_s, y_s)
        # print(primal_svm.weights)
        __, train_err = dual_svm.test_dataset(train_ex, train_labels)
        __, test_err = dual_svm.test_dataset(test_ex, test_labels)

        print("Train Error: ", train_err)
        print("Test Error: ", test_err)
Ejemplo n.º 8
0
    def get_images_labels_batch(self):

        images = []
        labels = []
        for index in range(self.batch_size):
            self.current_index += 1

            # void over flow
            if self.current_index > self.file_num_train - 1:
                self.current_index %= self.file_num_train

                self.images, self.labels = shuffle_data(samples=self.images,
                                                        labels=self.labels)

            images.append(self.images[self.current_index])
            labels.append(self.labels[self.current_index])

        images = np.stack(images)
        labels = np.stack(labels)

        return images, labels
Ejemplo n.º 9
0
    def load_data(self, b_unfold_label):
        file_path = self.file_path
        f = h5py.File(file_path, "r")
        self.images = np.array(f['images'])
        self.labels = np.array(f['labels'])
        f.close()

        def resize(x):
            x = x[:, :, [
                2, 1, 0
            ]]  # we use the pre-read hdf5 data file from the download page and need to change BRG to RGB
            return cv2.resize(x, (224, 224))

        # resize the image to 224 for the pretrained model
        self.images = np.array(list(map(resize, self.images)))

        # norm the image value
        self.images = self.images / 255.0
        self.images = np.transpose(self.images, (0, 3, 1, 2))
        # self.images = self.normalize(self.images)

        assert np.max(self.images) < 5.0 and np.min(self.images) > -5.0

        # shift the labels to start from 0
        self.labels -= np.min(self.labels)

        if b_unfold_label:
            self.labels = unfold_label(labels=self.labels,
                                       classes=len(np.unique(self.labels)))
        assert len(self.images) == len(self.labels)

        self.file_num_train = len(self.labels)
        print('data num loaded:', self.file_num_train)

        if self.stage is 'train':
            self.images, self.labels = shuffle_data(samples=self.images,
                                                    labels=self.labels)
Ejemplo n.º 10
0
def fit_Regressor(para,root_dir ,output_directory):
	k,f,b,l,d, n_bins , noise_scale = para
	data = load_data_w_sim_noise(root_dir, scale=noise_scale)
	shuffle_data(data)

	process = processdata(data)
	process.shift_traces(nbins=n_bins)

	process.linear_normalisation()

	process.split_data()


	process.convert_data()

	x_train = process.x_train
	y_train = process.y_train
	x_test = process.x_test
	y_test = process.y_test

	save_test_data_2_hdf5(x_test)


	print(len(x_test), len(y_test))
	print(len(y_train), len(y_train))
	input_shape = (1000, 1, 1)

	Regressor = create_Regressor(Regressor_name, input_shape= input_shape, para= para, output_directory= output_directory)

	Regressor.fit(x_train, y_train, para, outfile=output_directory)

	model = keras.models.load_model(f"{output_directory}/best_model.hdf5")





	y_pred = model.predict(x_test)
	print(len(y_pred))
	print(len(y_test))
	y_pred = np.squeeze(y_pred)
	y_test = np.squeeze(y_test)
	x_test = np.squeeze(x_test)
	e_pred = calculate_energy(y_pred, process.shifts)
	e_real = calculate_energy(y_test, process.shifts)
	e_pred_snr = calculate_signal2noise(y_pred)
	e_real_snr = calculate_signal2noise(y_test)

	plt.hist([e_pred_snr,e_real_snr],bins=30, label=["SNR Prdediction","SNR True" ])
	plt.legend(loc='upper left')
	plt.xlim(0,150)
	plt.savefig(f"{output_directory}/SNR.png")
	plt.close()
	diff = (e_pred - e_real) / e_real
	snr_3 = []

	for x,y in zip(e_pred_snr, diff):
		if x > 3:
			snr_3.append(y)

	snr_2 = []
	for x,y in zip(e_pred_snr, diff):
		if x > 2:
			snr_2.append(y)


	history = np.genfromtxt(f"{output_directory}/history.csv", delimiter=',', names=True)
	plot_energy_distribution(snr_3, path_outfile=output_directory, label="snr>3")
	plot_energy_distribution(snr_2, path_outfile=output_directory,  label="snr>2")
	plot_energy_distribution(diff, path_outfile=output_directory , label=None)

	plot_history(history, output_directory)
	plot_traces(x_test, y_test,y_pred, output_directory, title="result")
Ejemplo n.º 11
0
    def __init__(self, config):
        super(TFRecordsDensenet, self).__init__(config)

        utils.remove_dirs([config.dataset_path_train_aug])
        utils.create_dirs([
            config.tfrecords_path_train, config.tfrecords_path_val,
            config.tfrecords_path_test, config.dataset_path_train_aug
        ])

        ## Generate augmented dataset
        # self.data_augmentation_v1(self.config.dataset_path_train)
        # self.data_augmentation_v2()

        ## Read dataset
        image_paths_orig_train, labels_orig_train = self.read_dataset(
            self.config.dataset_path_train)
        image_paths_train_aug, labels_train_aug = self.read_dataset(
            self.config.dataset_path_train_aug)
        image_paths_train = image_paths_orig_train + image_paths_train_aug
        labels_train = labels_orig_train + labels_train_aug

        image_paths_val, labels_val = self.read_dataset(
            self.config.dataset_path_val)

        image_paths_test, labels_test = self.read_dataset(
            self.config.dataset_path_test)

        ## Shuffle data
        image_paths_train, labels_train = utils.shuffle_data(
            image_paths_train, labels_train)
        image_paths_val, labels_val = utils.shuffle_data(
            image_paths_val, labels_val)
        image_paths_test, labels_test = utils.shuffle_data(
            image_paths_test, labels_test)

        ## For debugging on smaller dataset
        if config.debug_train_images_count != 0:
            image_paths_train = image_paths_train[0:config.
                                                  debug_train_images_count]
            labels_train = labels_train[0:config.debug_train_images_count]
        if config.debug_val_images_count != 0:
            image_paths_val = image_paths_val[0:config.debug_val_images_count]
            labels_val = labels_val[0:config.debug_val_images_count]
        if config.debug_test_images_count != 0:
            image_paths_test = image_paths_test[0:config.
                                                debug_test_images_count]
            labels_test = labels_test[0:config.debug_test_images_count]

        ## Convert train dataset to TFRecord
        self.dataset_to_tfrecords(image_paths_train,
                                  labels_train,
                                  output_path=self.config.tfrecords_path_train)

        ## Convert val dataset to TFRecord
        self.dataset_to_tfrecords(image_paths_val,
                                  labels_val,
                                  output_path=self.config.tfrecords_path_val)

        ## Convert test dataset to TFRecord
        self.dataset_to_tfrecords(image_paths_test,
                                  labels_test,
                                  output_path=self.config.tfrecords_path_test)