Exemple #1
0
    def rounds(self):
        if self.current_file_index >= len(self.files):
            return
        # print("===> Now Operate on File: " + str(self.files[self.current_file_index]))
        self.round_no += 1
        train_data = load_data(self.files[self.current_file_index])
        self.current_file_index += 1
        ROUND_BATCH = int(len(train_data) * 0.1)
        DATA_LENGTH = len(train_data)
        next_line = 0
        while next_line < DATA_LENGTH:
            self.process_one_round(train_data, next_line, ROUND_BATCH)
            next_line += ROUND_BATCH

        self.merge_CS()
        self.make_CS()
        # self.merge_CS() # Change to Merge First Than Make
        self.information_summary()
        displaySet(self.DS_set)
        self.rounds()  # Trigger next round
Exemple #2
0
    def initialisation(self):
        # print("===> Now Operate on File: " + str(self.files[self.current_file_index]))
        self.round_no += 1
        train_data = load_data(self.files[self.current_file_index])
        self.current_file_index += 1
        train_data, sample = sample_data(train_data, self.sample_ratio,
                                         self.beta)
        ROUND_BATCH = int(len(train_data) * 0.1)
        DATA_LENGTH = len(train_data)
        X = sample
        keamns = _KMeans(K=self.K, tol=1, compulsory=True, n_init=15)
        Y = keamns.fit(X)
        self.DS_set = initialise_DS(X, Y, self.K)
        # print("**** Initialisation Begin ****")
        displaySet(self.DS_set)

        next_line = 0
        while next_line < DATA_LENGTH:
            self.process_one_round(train_data, next_line, ROUND_BATCH)
            next_line += ROUND_BATCH  # Save

        self.make_CS()
        self.information_summary()
Exemple #3
0
    y -- true labels
    p -- predictions
    """
    a = p + y
    mislabeled_indices = np.asarray(np.where(a == 1))
    plt.rcParams['figure.figsize'] = (40.0, 40.0) # set default size of plots
    num_images = len(mislabeled_indices[0])
    for i in range(num_images):
        index = mislabeled_indices[1][i]

        plt.subplot(2, num_images, i + 1)
        plt.imshow(X[:,index].reshape(64,64,3), interpolation='nearest')
        plt.axis('off')
        plt.title("Prediction: " + classes[int(p[0,index])].decode("utf-8") + " \n Class: " + classes[y[0,index]].decode("utf-8"))

train_x_orig, train_y, test_x_orig, test_y, classes = load_data()

# Reshape the training and test examples
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T   # The "-1" makes reshape flatten the remaining dimensions
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T

# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten/255.
test_x = test_x_flatten/255.


layers_dims = [12288, 7, 7, 5, 1]

parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 5000, print_cost = True, model_name = 'model_v1', save = True)

predictions_train = predict(train_x, train_y, parameters)
Exemple #4
0
def load_all_data(paths):
    train_data = []
    for input_file_path in paths:
        train_data = train_data + load_data(input_file_path)
    return train_data