Exemple #1
0
 def train(self):
     logdir = self.model_dir + datetime.datetime.now().strftime(
         "%Y%m%d-%H%M%S")
     hr_images_test, lr_images_test, hr_images_train, lr_images_train = images_loader_mini(
         self.input_dir, self.scale)
     y_train_hr = np.array(hr_images_train[:500000])
     x_train_lr = np.array(lr_images_train[:500000])
     y_test_hr = np.array(hr_images_test[:15000])
     x_test_lr = np.array(lr_images_test[:15000])
     y_train_hr = normalize(y_train_hr)
     y_test_hr = normalize(y_test_hr)
     x_train_lr = normalize(x_train_lr)
     x_test_lr = normalize(x_test_lr)
     model = SRDeepCNN(self.channels, self.scale).build_model()
     model.compile(loss=content_loss,
                   optimizer=get_optimizer(),
                   metrics=[metrics.mse, metrics.categorical_accuracy])
     tensorboard_callback = keras.callbacks.TensorBoard(
         log_dir=logdir,
         batch_size=self.batch_size,
         write_graph=True,
         write_images=True,
         write_grads=True)
     loss_history = model.fit(x_train_lr,
                              y_train_hr,
                              batch_size=self.batch_size,
                              epochs=self.epochs,
                              verbose=1,
                              validation_data=([x_test_lr, y_test_hr]),
                              callbacks=[tensorboard_callback])
     save_model(model, loss_history, self.model_dir)
     plot_generated_test(self.output_dir, model, y_test_hr, x_test_lr)
Exemple #2
0
def train():
    algorithm = "GMM"

    n_cluster = 7

    xy = np.loadtxt('faults.csv', delimiter=',')  #1941
    x_data = xy[:, :-n_cluster]

    if use_normalize:
        normalize(x_data)

    pandas_data = pd.DataFrame(data=x_data)

    if algorithm == "K-Means":
        result = KMeans(n_clusters=n_cluster,
                        init='k-means++',
                        max_iter=300,
                        random_state=random_Seed).fit(pandas_data).labels_
    elif algorithm == "GMM":
        gmm = GaussianMixture(n_components=7, random_state=random_Seed)
        gmm_label = gmm.fit(pandas_data).predict(pandas_data)
        result = gmm_label
    else:
        return

    pandas_data['cluster'] = result

    pca = PCA(n_components=2)
    pca_transformed = pca.fit_transform(pandas_data)

    pandas_data['pca_x'] = pca_transformed[:, 0]
    pandas_data['pca_y'] = pca_transformed[:, 1]
    pandas_data.head(3)

    marker_ind = []

    for i in range(n_cluster):
        marker_ind.append(pandas_data[pandas_data['cluster'] == i].index)

    for i in range(n_cluster):
        plt.scatter(x=pandas_data.loc[marker_ind[i], 'pca_x'],
                    y=pandas_data.loc[marker_ind[i], 'pca_y'],
                    marker=marker[i])

    plt.xlabel('PCA 1')
    plt.ylabel('PCA 2')
    plt.title(
        '7 Clusters Visualization by 2 PCA Components({0})'.format(algorithm))

    plt.show()
    def predict(self, image_matrix):
        X = image_matrix.reshape((1, image_matrix.size))
        mu = np.loadtxt('mu.csv')
        sigma = np.loadtxt('sigma.csv')
        X, mu, sigma = normalize(X, mu, sigma)
        hidden_size = self.config['hidden_size']
        input_size = self.config['input_size']
        num_labels = self.config['num_labels']
        theta1 = self.nn_params[:((hidden_size) * (input_size + 1))].reshape(
            (hidden_size, input_size + 1))
        theta2 = self.nn_params[((hidden_size) * (input_size + 1)):].reshape(
            (num_labels, hidden_size + 1))

        a1 = insert_bias(X)

        z2 = theta1.dot(a1.T)
        a2 = sigmoid(z2)

        a2 = insert_bias(a2.T)
        print theta2.shape

        z3 = theta2.dot(a2.T)
        h = sigmoid(z3)
        print h
        return h
Exemple #4
0
    def _solve(self, rule, query):
        ops = []
        facts = []
        for f in rule:
            if f.isalpha():
                facts.append(f)
            else:
                ops.append(f)

        if ops[0] == self.config.op["and"]:
            res = op_and(self.data[facts[0]], self.data[facts[0]])
            res = normalize(res)

        for i, op in enumerate(ops[1:]):
            if ops[0] == self.config.op["and"]:
                res = op_and(res, self.data[facts[i + 2]])
                res = normalize(res)

        self.data[query] = res
def main():
    X_person, y_person = [], []
    X_org, y_org = [], []
    X_loc, y_loc = [], []
    for input in input_list:
        entities, person_mat, org_mat, loc_mat = compute_features(input)
        normalize(person_mat)
        normalize(org_mat)
        normalize(loc_mat)
        Xys = generate_diff_data(entities, person_mat, org_mat, loc_mat)
        X_person += Xys[0][0]
        y_person += Xys[0][1]
        X_org += Xys[1][0]
        y_org += Xys[1][1]
        X_loc += Xys[2][0]
        y_loc += Xys[2][1]

    print "Training Person Ranks..."
    clf_person = svm.SVC(C=1, kernel='linear')
    clf_person.fit(X_person, y_person)
    print "Training Error = %.2f%%" % (training_error(X_person, y_person, clf_person) * 100)
    print "Training Organization Ranks..."
    clf_org = svm.SVC(C=1, kernel='linear')
    clf_org.fit(X_org, y_org)
    print "Training Error = %.2f%%" % (training_error(X_org, y_org, clf_org) * 100)
    print "Training Location Ranks..."
    clf_loc = svm.SVC(C=1, kernel='linear')
    clf_loc.fit(X_loc, y_loc)
    print "Training Error = %.2f%%" % (training_error(X_loc, y_loc, clf_loc) * 100)
    print "Storing Classifiers..."
    pickle.dump((clf_person, clf_org, clf_loc), open("classifiers.dat", "w"))
Exemple #6
0
def ml_ranker(entities, person_mat, org_mat, loc_mat):
    clf_person, clf_org, clf_loc = pickle.load(open("classifiers.dat", "r"))
    data_set = [(person_mat, clf_person), (org_mat, clf_org), (loc_mat, clf_loc)]
    ret = []
    scores = []
    for data, clf in data_set:
        normalize(data)
        score = [0] * len(data)
        for i in xrange(len(data) - 1):
            for j in xrange(i + 1, len(data)):
                if clf.predict(dist_vector(data[i], data[j])) == [1]:
                    score[i] += 1
                else:
                    score[j] += 1
                if clf.predict(dist_vector(data[j], data[i])) == [-1]:
                    score[i] += 1
                else:
                    score[j] += 1
        result = sorted(range(len(data)), key=lambda i: -score[i])
        ret.append(result)
        scores.append(score)
    return ret, scores
    def test(self):
        if self.model_path[len(self.model_path) - 3:] != '.h5':
            print('Error: incompatible model file')
            exit()
        model = load_model(filepath=self.model_path, custom_objects={'content_loss': content_loss})

        if isfile(self.input_path):
            image = data.imread(self.input_path)
            image = set_image_alignment(image, 2)
            if image.shape[0] >= HEIGHT_LIMITED or image.shape[1] >= WIDTH_LIMITED:
                image_batch, batch_shape = split_for_test(image)
                image_batch = normalize(image_batch)
                image_generated_batch = model.predict([image_batch])
                image_generated = build_image_from_batch(image_generated_batch, batch_shape)
            else:
                image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
                image = normalize(image)
                image_generated = model.predict([image])
                image_generated = image_generated.reshape(
                    (image_generated.shape[1], image_generated.shape[2], image_generated.shape[3]))

            image = denormalize(image_generated)
            save_image(self.output_path, image)
    def train(self, X, y, cv, test_set, cv_y, test_y):
        self.clear()
        col = self.config['input_size']
        cv = np.reshape(cv, (len(cv)/col, col))
        test_set = np.reshape(test_set, (len(test_set)/col, col))

        m = 0

        X, mu, sigma = normalize(X)

        # save X, mu and sigma
        np.savetxt('X.csv', X, delimiter=',')
        np.savetxt('mu.csv', mu, delimiter=',')
        np.savetxt('sigma.csv', sigma, delimiter=',')

        self.nn_params = optimize.fmin_cg(f, self.nn_params, args=(self, X, y), maxiter=50,
            fprime=fprime)

        # save nn_parameters
        hidden_size = self.config['hidden_size']
        input_size = self.config['input_size']
        num_labels = self.config['num_labels']
        theta1 = self.nn_params[:((hidden_size) * (input_size + 1))].reshape(
            (hidden_size, input_size + 1))
        theta2 = self.nn_params[((hidden_size) * (input_size + 1)):].reshape(
            (num_labels, hidden_size + 1))
        np.savetxt('Theta1.csv', theta1, delimiter=',')
        np.savetxt('Theta2.csv', theta2, delimiter=',')

        #test the model
        p = predict1(theta1, theta2, X)
        _accuracy = accuracy(p, y)
        l_fscores = list_of_fscores(p, y, num_labels)
        fscore = total_fscore(l_fscores)

        #test the model on cross validation set
        fscores_cv = 0.0
        accuracy_cv = 0.0
        l_fscores_cv = 0.0
        no = 0

        X1, mu1, sigma1 = normalize(np.array(cv), mu, sigma)
        p_cv = predict1(theta1, theta2, X1)
        accuracy_cv = accuracy(p_cv, cv_y)
        l_fscores_cv = list_of_fscores(p_cv, cv_y, num_labels)
        fscores_cv = total_fscore(l_fscores_cv)

        #test the model on test set
        X_test, mu_test, sigma_test = normalize(np.array(test_set), mu, sigma)
        p_test = predict1(theta1, theta2, X_test)
        accuracy_test = accuracy(p_test, test_y)
        l_fscores_test = list_of_fscores(p_test, test_y, num_labels)
        fscore_test = total_fscore(l_fscores_test)

        print 'here are the shizz results:'
        print 'fscores in cross validation:'
        print fscores_cv, ' ', accuracy_cv
        print 'fscores in test set: ', fscore_test
        print 'accuracy_test: ', accuracy_test

        return self.nn_cfx(X, y, self.nn_params), self.nn_params, fscores_cv, fscore_test
# simpler format for building your arrays
x = concat(a_train, c_train)
print('concatenated x1 and x2')

samples, r, c = x.shape
x = np.reshape(x, (samples, r * c))

y = create_labels(len(a_train), len(c_train)).ravel()

print("Created training data and labels")
print("x shape: ", x.shape)
print("y shape: ", y.shape)

# normalize x
normalize(x)

print("Normalized x")

test_data = concat(a_test, c_test)

samples, r, c = test_data.shape
test_data = np.reshape(test_data, (samples, r * c))

test_data_labels = create_labels(len(a_test), len(c_test)).ravel()

print("Created testing data and labels")

normalize(test_data)

print("Normalized test data")
Exemple #10
0
	def kps(self):
		if not hasattr(self, '_kps'):
			self._kps = normalize(self.Kinv, self.kpus)
		return self._kps
Exemple #11
0
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        return x


model = Classifier().to(device)

xy = np.loadtxt('faults.csv', delimiter=',')  #1941
x_data = xy[:, :-7]
y_data = xy[:, -7:]

if use_normalize:
    normalize(x_data, exclude_cols)

x_data = torch.FloatTensor(x_data).to(device)
y_data = torch.FloatTensor(y_data).to(device)

x_data, y_data = SuffleData(x_data, y_data, len(xy))

x_train = x_data[:training_len]
y_train = y_data[:training_len]

x_test = x_data[training_len:]
y_test = y_data[training_len:]
test_len = len(x_test)

optimizer = optim.Adam(model.parameters(), lr=0.0005)
Exemple #12
0
from scipy import misc
import cv2, numpy as np
from Utils import reverse, normalize
import Utils

Utils.SET_GPU_MEM()  #set GPU memory limit, Default: 0.5

#   =============  read row image  ================
rowImgname = "yourIamgeName.jpg".split(".")[0]
rowImg = misc.imread(rowImgname + ".jpg")
h, w, c = rowImg.shape
os.makedirs(rowImgname, exist_ok=True)

#   =============  generative mask_nobg  ================
model_nobg = load_model("models/yourModelName.h5")
fake_nobg = cv2.resize(reverse(model_nobg.predict(normalize(rowImg))), (w, h))
Utils.CLEAR_SESSION()
del model_nobg

#   =============  clean mask_nobg  ================
mask_nobg = cv2.medianBlur(
    Utils.noisetool().deNoise1_er(Utils.masktool().binary(fake_nobg, 32)), 7)

#   =============  bitwise and  ================
nobg_rgb = cv2.bitwise_and(rowImg, rowImg, mask=mask_nobg)
nobg_final = np.concatenate([nobg_rgb, mask_nobg.reshape(h, w, 1)], axis=-1)

misc.imsave(rowImgname + "/" + rowImgname + "-gan.jpg", fake_nobg)
misc.imsave(rowImgname + "/" + rowImgname + "-mask.jpg", mask_nobg)
misc.imsave(rowImgname + "/" + rowImgname + "-nobg.png", nobg_final)