Esempio n. 1
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if FLAGS.output_width is None:
        FLAGS.output_width = FLAGS.output_height
    if FLAGS.crop_width is None:
        FLAGS.crop_width = FLAGS.crop_height

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)
    if not os.path.exists(FLAGS.test_dir):
        os.makedirs(FLAGS.test_dir)

    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True

    with tf.Session(config=run_config) as sess:

        dsgan = DSGAN(sess,
                      depth_lambda=FLAGS.depth_lambda,
                      semantic_lambda=FLAGS.semantic_lambda,
                      batch_size=FLAGS.batch_size,
                      input_width=FLAGS.input_width,
                      input_height=FLAGS.input_height,
                      crop_width=FLAGS.crop_width,
                      crop_height=FLAGS.crop_width,
                      input_c_dim=FLAGS.input_c_dim,
                      output_width=FLAGS.output_width,
                      output_height=FLAGS.output_height,
                      output_c_dim=FLAGS.output_c_dim,
                      gf_dim=FLAGS.gf_dim,
                      df_dim=FLAGS.df_dim,
                      dataset_name=FLAGS.dataset,
                      checkpoint_dir=FLAGS.checkpoint_dir,
                      sample_dir=FLAGS.sample_dir,
                      is_crop=FLAGS.is_crop)

        if FLAGS.is_train:
            dsgan.train(FLAGS)
        else:
            if not dsgan.load(FLAGS.checkpoint_dir):
                raise Exception("[!] Train a model first, then run test mode")
            data = load_mat('../nyu_depth_v2_labeled.mat')
            train_test = load_mat('../splits.mat')
            test_idxs = [int(x - 1) for x in train_test["testNdxs"]]
            FLAGS.batch_size = 4
            for idx in test_idxs:
                dsgan.sample_model(data, 1, idx, FLAGS.test_dir, FLAGS)
Esempio n. 2
0
def test_utils():

    print("Test Utils")
    num = 1
    mat_file = utils.load_mat(DEPTH_DIR)
    RGB_list = utils.get_RGB_batch(mat_file, 0, 9, 480, 640, 240, 320, False)
    vis(RGB_list[0])
    depth_list = utils.get_depth_batch(mat_file, 0, 9, 480, 640, 240, 320,
                                       False)
    vis(utils.color_depth(np.array([depth_list[num]], dtype="float")))
    semantic_list = utils.get_semantic_batch(mat_file, 0, 9, 480, 640, 240,
                                             320, False)

    vis(utils.color_semantic(np.array([semantic_list[num]], dtype="float")))
Esempio n. 3
0
            break
        x = R[idx:, idx]
        if np.linalg.norm(x) == 0.:
            continue
        e = np.zeros_like(x)
        e[0] = np.linalg.norm(x)
        u = x - e
        v = u / np.linalg.norm(u)
        Q_cnt = np.identity(m)
        Q_cnt[idx:, idx:] -= 2.0 * np.outer(v, v)  #
        R = np.dot(Q_cnt, R)  #R=P1P2..PnA
        Q = np.dot(Q_cnt, Q)  #Q=P1P2P3...Pn
    return np.round(Q.T, 3), np.round(R, 3)  #保留三位
    # return Q.T,R


if __name__ == "__main__":
    path = r'data.txt'
    matrix = load_mat(path, "HR")
    if matrix.size == 0:
        print("input Error!")
        sys.exit()
    Q, R = Householder_Reduction(matrix)
    m, _ = Q.shape
    m, n = R.shape
    print(np.round(np.dot(Q, R), 2))
    print("Q=")
    print_mat(Q, m, m)
    print("R=")
    print_mat(R, m, n)
Esempio n. 4
0
import linear_svm
import matplotlib.pyplot as plt
import utils
from sklearn import preprocessing, metrics
from linear_classifier import LinearSVM_twoclass

############################################################################
#  Part  0: Loading and Visualizing Data                                   #
#  We start the exercise by first loading and visualizing the dataset.     #
#  The following code will load the dataset into your environment and plot #
#  the data.                                                               #
############################################################################

# load ex6data1.mat

X,y = utils.load_mat('data/ex4data1.mat')

utils.plot_twoclass_data(X,y,'x1', 'x2',['neg','pos'])
plt.savefig('fig1.pdf')


############################################################################
#  Part 1: Hinge loss function and gradient                                #
############################################################################

C = 1
theta = np.zeros((X.shape[1],))
J,grad = linear_svm.svm_loss_twoclass(theta,X,y,C)

print "J = ", J, " grad = ", grad
Esempio n. 5
0
import utils
from sklearn.preprocessing import PolynomialFeatures

############################################################
# Problem 3.2: Implementing Regularized Linear Regression 
############################################################

if __name__ == "__main__":

    grader = graderUtil.Grader()
    reg_submission = grader.load('reg_linear_regressor_multi')
    util_submission = grader.load('utils')
    test_regressor = reg_submission.RegularizedLinearReg_SquaredLoss()
    
    # Load the housing test dataset.
    X, y, Xtest, ytest, Xval, yval = utils.load_mat('ex2data1.mat')
    XX = np.vstack([np.ones((X.shape[0],)),X]).T

    poly = PolynomialFeatures(degree=6,include_bias=False)
    X_poly = poly.fit_transform(np.reshape(X,(len(X),1)))
    X_poly, mu, sigma = utils.feature_normalize(X_poly)

    # add a column of ones to X_poly

    XX_poly = np.vstack([np.ones((X_poly.shape[0],)),X_poly.T]).T
    print(X, XX_poly)

    # map Xtest and Xval into the same polynomial features

    X_poly_test = poly.fit_transform(np.reshape(Xtest,(len(Xtest),1)))
    X_poly_val = poly.fit_transform(np.reshape(Xval,(len(Xval),1)))
Esempio n. 6
0
#!/usr/bin/env python

from utils import load_mat
import numpy as np
import matplotlib.pylab as pl
from matplotlib.patches import Ellipse

data = load_mat('heightWeight')
data = data['heightWeightData']
sex = data[:, 0]
x = data[:, 1]
y = data[:, 2]
male_arg = (sex == 1)
female_arg = (sex == 2)
x_male = x[male_arg]
y_male = y[male_arg]
x_female = x[female_arg]
y_female = y[female_arg]

fig = pl.figure()
ax = fig.add_subplot(111)
ax.plot(x_male, y_male, 'bx')
ax.plot(x_female, y_female, 'ro')
pl.savefig('gaussHeightWeight_1.png')


def draw_ell(cov, xy, color):
    u, v = np.linalg.eigh(cov)
    angle = np.arctan2(v[0][1], v[0][0])
    angle = (180 * angle / np.pi)
    # here we time u2 with 5, assume 95% are in this ellipse
Esempio n. 7
0
#!/usr/bin/env python

from sklearn.naive_bayes import MultinomialNB
from utils import load_mat
import numpy as np
import matplotlib.pylab as pl

data = load_mat('XwindowsDocData')
xtrain = data['xtrain']
ytrain = data['ytrain']

clf = MultinomialNB()
clf.fit(xtrain, ytrain.ravel())
counts = clf.feature_count_
y_counts = clf.class_count_
for i in range(len(counts)):
    pl.figure()
    pl.bar(np.arange(len(counts[i])), counts[i] / y_counts[i])
    pl.title('p(xj=1|y=%d)' % (i + 1))
    pl.savefig('naiveBayesBowDemo_%d.png' % i)
pl.show()
Esempio n. 8
0
#!/usr/bin/env python

from utils import load_mat
from matplotlib import pylab as pl
import numpy as np

data = load_mat('mnistAll')
mnist = data['mnist']
train_images = mnist['train_images'][0][0]  # 28*28*60000
train_labels = mnist['train_labels'][0][0]  # 60000*1
test_images = mnist['test_images'][0][0]  # 28*28*10000
test_labels = mnist['test_labels'][0][0]  # 10000*1

fig1 = pl.figure(1)
fig2 = pl.figure(2)
np.random.seed(seed=10)
for i in range(10):
    img = test_images[:, :, i]
    ax1 = fig1.add_subplot(3, 3, i)
    ax1.imshow(img)
    ax1.set_xticks(())
    ax1.set_yticks(())
    ax1.set_title('true class = %s' % test_labels[i])

    img_shuffled = img.copy()
    # np.shuffle only along the first index, ravel it first
    np.random.shuffle(img_shuffled.ravel())
    img_shuffled = img_shuffled.reshape(img.shape)
    ax2 = fig2.add_subplot(3, 3, i)
    ax2.imshow(img_shuffled)
    ax2.set_xticks(())
Esempio n. 9
0
import utils
import scipy.io
import numpy as np
from linear_classifier import LinearSVM_twoclass
import sys
from sklearn.metrics.pairwise import rbf_kernel


#############################################################################
# load the SPAM email training and test dataset                             #
#############################################################################

print "-------GAUSSIAN/RBF KERNEL-------"
print "Reading the data..."
sys.stdout.flush()
X, y = utils.load_mat("data/spamTrain.mat")
yy = np.ones(y.shape)
yy[y == 0] = -1

X, Xval, yy, yyval = cross_validation.train_test_split(X, yy, test_size=0.1)
test_data = scipy.io.loadmat("data/spamTest.mat")
X_test = test_data["Xtest"]
y_test = test_data["ytest"].flatten()
yy_test = np.ones(y_test.shape)
yy_test[y_test == 0] = -1
print "Done!"
sys.stdout.flush()

#############################################################################
# your code for setting up the best SVM classifier for this dataset         #
# Design the training parameters for the SVM.                               #
Esempio n. 10
0
from reg_linear_regressor_multi import RegularizedLinearReg_SquaredLoss

########################################################################
## =========== Part 1: Loading and Visualizing Data ===================#
########################################################################
#  We start the exercise by first loading and visualizing the dataset. #
#  The following code will load the dataset into your environment and  #
#  plot the data.                                                      #
########################################################################


# Load Training Data

print 'Loading and Visualizing Data ...'

X, y, Xtest, ytest, Xval, yval = utils.load_mat('ex2data1.mat')

# Plot training data

plot_utils.plot_data(X,y,'Change in water level (x)','Water flowing out of the dam (y)')
plt.savefig('fig6.pdf')

########################################################################
## =========== Part 2: Regularized Linear Regression ==================#
########################################################################
#  You should now implement the loss function and gradient of the
# loss function for regularized linear regression in reg_linear_regression_multi.py

# append a column of ones to matrix X

XX = np.vstack([np.ones((X.shape[0],)),X]).T
Esempio n. 11
0
#!/usr/bin/env python

from utils import load_mat
from matplotlib import pylab as pl
import numpy as np


data = load_mat("mnistAll")
mnist = data["mnist"]
train_images = mnist["train_images"][0][0]  # 28*28*60000
train_labels = mnist["train_labels"][0][0]  # 60000*1
test_images = mnist["test_images"][0][0]  # 28*28*10000
test_labels = mnist["test_labels"][0][0]  # 10000*1

fig1 = pl.figure(1)
fig2 = pl.figure(2)
np.random.seed(seed=10)
for i in range(10):
    img = test_images[:, :, i]
    ax1 = fig1.add_subplot(3, 3, i)
    ax1.imshow(img)
    ax1.set_xticks(())
    ax1.set_yticks(())
    ax1.set_title("true class = %s" % test_labels[i])

    img_shuffled = img.copy()
    # np.shuffle only along the first index, ravel it first
    np.random.shuffle(img_shuffled.ravel())
    img_shuffled = img_shuffled.reshape(img.shape)
    ax2 = fig2.add_subplot(3, 3, i)
    ax2.imshow(img_shuffled)
Esempio n. 12
0
from sklearn import preprocessing, metrics
import utils
import scipy.io
import numpy as np
from linear_classifier import LinearSVM_twoclass
from sklearn.cross_validation import train_test_split

#############################################################################
# load the SPAM email training and test dataset                             #
#############################################################################

X, y = utils.load_mat('data/spamTrain.mat')
yy = np.ones(y.shape)
yy[y == 0] = -1

test_data = scipy.io.loadmat('data/spamTest.mat')
X_test = test_data['Xtest']
y_test = test_data['ytest'].flatten()

#############################################################################
# your code for setting up the best SVM classifier for this dataset         #
# Design the training parameters for the SVM.                               #
# What should the learning_rate be? What should C be?                       #
# What should num_iters be? Should X be scaled? Should X be kernelized?     #
#############################################################################
# your experiments below

X_train, X_val, y_train, y_val = train_test_split(X,
                                                  y,
                                                  test_size=0.8,
                                                  random_state=42)
Esempio n. 13
0
if __name__ == "__main__":
    arg = parse_args()
    print("========Call with Arguments========")
    print(arg)

    if not os.path.exists(RESULTS_PATH):
        os.mkdir(RESULTS_PATH)
        print(">>> Directory {} created.".format(RESULTS_PATH))

    if not os.path.exists(BCM_PATH):
        os.mkdir(BCM_PATH)
        print(">>> Directory {} created.".format(BCM_PATH))

    print("\n========Reading Data========")
    data, _ = load_mat(arg.data_path, False, 1, 1, ',', True, False, None,
                       None)
    label = load_label(arg.label_path, ',', '0')
    data = data["data"]
    k_means_logger = Logger(LOG_PATH,
                            "Benchmark_K_MEANS.log",
                            benchmark_logger=True)
    dbscan_logger = Logger(LOG_PATH,
                           "Benchmark_DBSCAN.log",
                           benchmark_logger=True)

    k_means_results = {}
    dbscan_results = {}

    print("\n========Benchmarking========")

    for dim in DR_DIM:
Esempio n. 14
0
#!/usr/bin/env python

from utils import load_mat
import numpy as np
import matplotlib.pylab as pl
from scipy import ndimage

data = load_mat('20news_w100')
documents = data['documents']
documents = documents.toarray().T
newsgroups = data['newsgroups'][0]

#sort documents by number of words and choose the first 1000
chosen_docs_arg = np.argsort(np.sum(documents, axis=1))
chosen_docs_arg = chosen_docs_arg[-1000:][::-1]  # descend
documents = documents[chosen_docs_arg]
newsgroups = newsgroups[chosen_docs_arg]

#sort by newsgroups label
sorted_arg = np.argsort(newsgroups)
documents = documents[sorted_arg]
newsgroups = newsgroups[sorted_arg]

#zoom the image to show it
image = ndimage.zoom(documents, (1, 10))
pl.imshow(image, cmap=pl.cm.gray, interpolation='none')
#draw a red line betweent different newsgroups
groups_label = np.unique(newsgroups)
for i in range(len(groups_label) - 1):
    y, = np.where(newsgroups == groups_label[i + 1])
    y = y[0]
Esempio n. 15
0
save_point = torch.load(model_path)
model_param = save_point['state_dict']
model = resblock(conv_bn_relu_res_block, 10, 3, 1)
model.load_state_dict(model_param)

model = model.cuda()
model.eval()

for img_name in sorted(os.listdir(img_path)):
    img_path_name = os.path.join(img_path, img_name)
    rgb = imread(img_path_name)
    rgb = rgb / 255
    rgb = np.expand_dims(np.transpose(rgb, [2, 1, 0]), axis=0).copy()

    img_res1 = reconstruction(rgb, model)
    img_res2 = np.flip(reconstruction(np.flip(rgb, 2).copy(), model), 1)
    img_res3 = (img_res1 + img_res2) / 2

    mat_name = img_name[:-4] + '.mat'
    mat_dir = os.path.join(result_path, mat_name)

    save_matv73(mat_dir, var_name, img_res3)

    gt_name = img_name[12:-4] + '.mat'
    gt_dir = os.path.join(gt_path, gt_name)
    gt = load_mat(gt_dir, var_name)
    mrae_error = mrae(img_res3, gt['rad'][:, :, 1])
    rrmse_error = rmse(img_res3, gt['rad'][:, :, 1])
    print("[%s] MRAE=%0.9f RRMSE=%0.9f" % (img_name, mrae_error, rrmse_error))
Esempio n. 16
0
            mat[i, row] = factor
            plus = -1 * factor * mat[row, row + 1:-1]
            mat[i, row + 1:-1] += plus
        temp = mat.copy()
        # print_mat(mat,m,n)
    U = np.triu(mat[:, :-1], 0)
    for i in range(m):
        temp[i, i] = 1
    L = np.tril(temp[:, :-1], 0)
    P = np.zeros(shape=(m, n - 1))
    for j in range(m):
        row_idx = int(mat[j, -1])
        P[j, row_idx - 1] = 1
    return P, L, U


if __name__ == "__main__":
    path = r"data.txt"
    matrix = load_mat(path, "LU")
    if matrix.size == 0:
        print("input Error!")
        sys.exit()
    P, L, U = LU_factorization(matrix)
    m, n = P.shape
    print("L=")
    print_mat(L, m, m)
    print("U=")
    print_mat(U, m, m)
    print("P=")
    print_mat(P, m, m)
Esempio n. 17
0
    for col in range(n):
        cur_col = mat[:, col]
        if col == 0:  #a1=||a1||*q1
            R[0, 0] = math.sqrt(np.sum(np.square(cur_col)))
            q = cur_col / R[0, 0]
            Q[:, col] = q
        else:
            q = cur_col.copy()
            for i in range(col):
                R[i, col] = np.matmul(Q[:, i], mat[:, col])
            for j in range(col):  #qi=ai- <q1|ak>q1-<q2|ai>q2- <qi-1|ai>qi-1
                q -= R[j, col] * Q[:, j]
            R[col, col] = math.sqrt(np.sum(np.square(q)))
            q = q / R[col, col]
            Q[:, col] = q
    return Q, R


if __name__ == "__main__":
    path = r'data.txt'
    matrix = load_mat(path, "QR")
    if matrix.size == 0:
        print("input Error!")
        sys.exit()
    Q, R = QR(matrix)
    m, n = Q.shape
    print("Q=")
    print_mat(Q, m, m)
    print("R=")
    print_mat(R, m, n)
Esempio n. 18
0
#!/usr/bin/env python

from utils import load_mat
import numpy as np
import matplotlib.pylab as pl
from scipy import ndimage

data = load_mat('20news_w100')
documents = data['documents']
documents = documents.toarray().T
newsgroups = data['newsgroups'][0]

#sort documents by number of words and choose the first 1000
chosen_docs_arg = np.argsort(np.sum(documents, axis=1))
chosen_docs_arg = chosen_docs_arg[-1000:][::-1]  # descend
documents = documents[chosen_docs_arg]
newsgroups = newsgroups[chosen_docs_arg]

#sort by newsgroups label
sorted_arg = np.argsort(newsgroups)
documents = documents[sorted_arg]
newsgroups = newsgroups[sorted_arg]

#zoom the image to show it
image = ndimage.zoom(documents, (1, 10))
pl.imshow(image, cmap=pl.cm.gray, interpolation='none')
#draw a red line betweent different newsgroups
groups_label = np.unique(newsgroups)
for i in range(len(groups_label) - 1):
    y, = np.where(newsgroups == groups_label[i + 1])
    y = y[0]
Esempio n. 19
0
def merge_mat():
    mat_dir = "/media/2T/data/data_40/benchmarkData/metadata/classMapping40.mat"
    f = utils.load_mat(mat_dir)
    lut = f["mapClass"][0]
    lut = np.concatenate((np.array([40]), lut))
    print lut
Esempio n. 20
0
#!/usr/bin/env python

from utils import load_mat
import numpy as np
import matplotlib.pylab as pl
from matplotlib.patches import Ellipse

data = load_mat('heightWeight')
data = data['heightWeightData']
sex = data[:, 0]
x = data[:, 1]
y = data[:, 2]
male_arg = (sex == 1)
female_arg = (sex == 2)
x_male = x[male_arg]
y_male = y[male_arg]
x_female = x[female_arg]
y_female = y[female_arg]

fig = pl.figure()
ax = fig.add_subplot(111)
ax.plot(x_male, y_male, 'bx')
ax.plot(x_female, y_female, 'ro')
pl.savefig('gaussHeightWeight_1.png')


def draw_ell(cov, xy, color):
    u, v = np.linalg.eigh(cov)
    angle = np.arctan2(v[0][1], v[0][0])
    angle = (180 * angle / np.pi)
    # here we time u2 with 5, assume 95% are in this ellipse
Esempio n. 21
0
from sklearn import preprocessing, metrics, cross_validation
import utils
import scipy.io
import numpy as np
from linear_classifier import LinearSVM_twoclass
import sys


#############################################################################
# load the SPAM email training and test dataset                             #
#############################################################################

print "-------3RD DEGREE POLYNOMIAL KERNEL-------"
print "Reading the data..."
sys.stdout.flush()
X,y = utils.load_mat('data/spamTrain.mat')
yy = np.ones(y.shape)
yy[y==0] = -1

X, Xval, yy, yyval = cross_validation.train_test_split(X, yy, test_size=0.1)
test_data = scipy.io.loadmat('data/spamTest.mat')
X_test = test_data['Xtest']
y_test = test_data['ytest'].flatten()
yy_test = np.ones(y_test.shape)
yy_test[y_test==0] = -1
print "Done!"
sys.stdout.flush()

#############################################################################
# your code for setting up the best SVM classifier for this dataset         #
# Design the training parameters for the SVM.                               #
Esempio n. 22
0
import linear_svm
import matplotlib.pyplot as plt
import utils
from sklearn import preprocessing, metrics
from linear_classifier import LinearSVM_twoclass

############################################################################
#  Part  0: Loading and Visualizing Data                                   #
#  We start the exercise by first loading and visualizing the dataset.     #
#  The following code will load the dataset into your environment and plot #
#  the data.                                                               #
############################################################################

# load ex6data1.mat

X, y = utils.load_mat('data/ex4data1.mat')

utils.plot_twoclass_data(X, y, 'x1', 'x2', ['neg', 'pos'])
plt.savefig('fig1.pdf')

############################################################################
#  Part 1: Hinge loss function and gradient                                #
############################################################################

C = 1
theta = np.zeros((X.shape[1], ))
J, grad = linear_svm.svm_loss_twoclass(theta, X, y, C)

print "J = ", J, " grad = ", grad

############################################################################
Esempio n. 23
0
def main(args):
    path = args.files
    matrix = load_mat(path, args.mode)
    if matrix.size == 0:
        print("input Matrix Error!")
        sys.exit()
    m, n = matrix.shape
    if args.mode == "LU":
        print("LU Factorization, the input should be a square matrix.\n")
    elif args.mode == "QR":
        r = np.linalg.matrix_rank(matrix)
        if r < n:
            print(
                "Error!\n QR Factorization, The matrix has linearly dependent columns can not be uniquely factored as A=QR!\n"
            )
    print("=" * 50, "\norigin matrix type: {m} * {n}".format(m=m, n=n),
          "\nOrigin Matrix A = ")
    print_mat(matrix, m, n)
    print("\nThe factorization is processing!\n ")
    if args.mode == "LU":
        P, L, U = LU_factorization(matrix)
        m, n = P.shape
        print("L=")
        print_mat(L, m, m)
        print("U=")
        print_mat(U, m, m)
        print("P=")
        print_mat(P, m, m)
    elif args.mode == "QR":
        Q, R = QR(matrix)
        m, n = Q.shape
        m1, n1 = R.shape
        print("Q=")
        print_mat(Q, m, n)
        print("R=")
        print_mat(R, m1, n1)
    elif args.mode == "Householder":
        Q, R = Householder_Reduction(matrix)
        m, n = Q.shape
        m1, n1 = R.shape
        print("Q=")
        print_mat(Q, m, n)
        print("R=")
        print_mat(R, m1, n1)
    elif args.mode == "Givens":
        Q, R = Givens_Reduction(matrix)
        m, n = Q.shape
        m1, n1 = R.shape
        print("Q=")
        print_mat(Q, m, n)
        print("R=")
        print_mat(R, m1, n1)
    elif args.mode == "URV":
        U, R, V = URV(matrix)
        m, n = U.shape
        m1, n1 = R.shape
        m2, n2 = V.shape
        print("U=")
        print_mat(U, m, n)
        print("R=")
        print_mat(R, m1, n1)
        print("V=")
        print_mat(V, m2, n2)