예제 #1
0
def op2():
    """
    generate rotation data
    :return:
    """
    data = common.Data("mnist/mnist_train/train_data.npy",
                       "mnist/mnist_train/mnist_train_label",
                       "mnist/mnist_test/test_data.npy",
                       "mnist/mnist_test/mnist_test_label", 1, 28)
    fig = 28
    d = []
    l = []
    for i in range(len(data.train_x)):
        data.train_x[i] = data.train_x[i].astype(np.uint8)
        img = Image.fromarray(data.train_x[i].reshape(fig,
                                                      fig).astype(np.uint8))
        img_r = img.rotate(45)
        img_l = img.rotate(315)
        matrix = np.asarray(img_r)
        matrix = matrix.reshape(fig * fig, )
        d.append(data.train_x[i])
        d.append(matrix)
        matrix = np.asarray(img_l)
        matrix = matrix.reshape(fig * fig, )
        d.append(matrix)
        l.append(data.train_y_no_one_hot[i])
        l.append(data.train_y_no_one_hot[i])
        l.append(data.train_y_no_one_hot[i])

    np.save('mnist/mnist_train/rotate_train.npy', d)
    np.save('mnist/mnist_train/rotate_label.npy', l)
예제 #2
0
def main():
    data = common.Data(common_path+"/mnist_train/train_data.npy", common_path+"/mnist_train/mnist_train_label",
                    common_path+"/mnist_test/test_data.npy", common_path+"/mnist_test/mnist_test_label", fig)
    train_x = data.train_x
    test_x = data.test_x
    train_y = data.train_y
    test_y = data.test_y
    SVM(train_x, test_x, train_y, test_y,('rbf',1.3))
예제 #3
0
def main():
    common.configure_logging('korred')
    data = common.Data()
    handler = Handler(data=data)
    NativeMessageInterface(
        callback=handler.handle,
        interactive=os.getenv('INTERACTIVE', False),
    ).run()
예제 #4
0
def main():

    data = common.Data(common_path + "/mnist_train/train_data.npy",
                       common_path + "/mnist_train/mnist_train_label",
                       common_path + "/mnist_test/test_data.npy",
                       common_path + "/mnist_test/mnist_test_label", fig)
    train_x = data.train_x
    test_x = data.test_x
    train_y = data.train_y
    test_y = data.test_y
    Autoencoder(train_x, test_x, train_y, test_y)
예제 #5
0
def op0():
    """
    generate lowest 15000 confident images
    :return: none
    """
    data = common.Data("mnist/mnist_train/train_data.npy",
                       "mnist/mnist_train/mnist_train_label",
                       "mnist/mnist_test/test_data.npy",
                       "mnist/mnist_test/mnist_test_label", 1, 28)

    res = common.predict('model/1.4.0', 60000, data.train_x, 28)
    common.gen_data(res, data.train_x, data.train_y_no_one_hot, 15000)
예제 #6
0
파일: korredd.py 프로젝트: dair-targ/Korred
def main():
    data = common.Data()
    with Daemon(data):
        # TODO: Check for configuration validity and ask user if the old configuration should be kept
        NativeMessagingConfiguration().write()
        launchctl_manager = LaunchctlManager()
        app = App(launchctl_manager)
        menu_item = rumps.MenuItem(title='Launch at Login')
        menu_item.state = launchctl_manager.is_loaded()
        app.menu = [
            'Install Firefox Extension...',
            menu_item,
            'View Logs',
        ]
        app.run()
예제 #7
0
def op1():
    """
    generate fc2 output for svm
    :return: none
    """
    data = common.Data("mnist/mnist_train/train_data.npy",
                       "mnist/mnist_train/mnist_train_label",
                       "mnist/mnist_test/test_data.npy",
                       "mnist/mnist_test/mnist_test_label", 1, 28)

    res = common.predict('CNN/model/SVM1', 60000, data.test_x, 28, "out1")

    data_fc = []
    for i in range(len(res)):
        data_fc.append(res[i][0][0][0])

    data_fc = np.array(data_fc)
    np.save('mnist/mnist_test/fc1_5.npy', data_fc)
    Reparameterization trick by sampling fr an isotropic unit Gaussian.
    :param z_mean: mean of Gaussian variable z
    :param z_log_var: covariance matrix of Gaussian variable z
    :return z sampled from z_mean and z_log_var
    """

    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon


# MNIST dataset
data = common.Data(common_path + "/mnist_train/train_data.npy",
                   common_path + "/mnist_train/mnist_train_label",
                   common_path + "/mnist_test/test_data.npy",
                   common_path + "/mnist_test/mnist_test_label", fig)
x_train = data.train_x
x_test = data.test_x
y_train = data.train_y
y_test = data.test_y

image_size = x_train.shape[1]
original_dim = image_size * image_size
x_train = np.reshape(x_train, [-1, original_dim])
x_test = np.reshape(x_test, [-1, original_dim])

# network parameters
input_shape = (original_dim, )
intermediate_dim = 512
batch_size = 128
예제 #9
0
        # print(x.shape)
        # print(self.line.weight.shape)
        nn.init.normal_(self.out.weight)

    def forward(self, x):
        re = self.line(x)
        re = self.active(re)
        re = self.line2(re)
        re = self.active2(re)
        re = self.line3(re)
        re = self.active3(re)
        re = self.out(re)
        return re


dataset = c.Data(x, y)
dataLoader = tud.DataLoader(dataset, batch_size=20, shuffle=True)

model = NetWork3()
loss_fn = nn.MSELoss()
optimer = torch.optim.Adam(model.parameters(), lr=1e-4)
# schuler = torch.optim.lr_scheduler.ExponentialLR(optimer, 0.5)
loss_arr = []


def train(epochs):
    time = 0
    last_loss = -1
    for epoch in range(1, epochs + 1):
        model.train()
        losses = []
예제 #10
0
파일: SVM.py 프로젝트: Hsveh/CS420_final_hw
"""
Read data from CNN_SVM
"""
import common
import numpy as np
from sklearn.preprocessing import scale
from sklearn.svm import SVC
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score

data = common.Data("../mnist/mnist_train/train_data.npy",
                   "../mnist/mnist_train/mnist_train_label",
                   "../mnist/mnist_test/test_data.npy",
                   "../mnist/mnist_test/mnist_test_label", 1, 28)
train = np.load('../mnist/mnist_train/fc1_5.npy')
test = np.load('../mnist/mnist_test/fc1_5.npy')

train = scale(train)
test = scale(test)

clf = SVC(kernel='rbf')
clf.fit(train, data.train_y_no_one_hot)
y_pred = clf.predict(test)

print(classification_report(data.test_y_no_one_hot, y_pred))
print(accuracy_score(data.test_y_no_one_hot, y_pred))
예제 #11
0
import common
"""
Variable Definition
batch_size: batch size
fig: image size
max_epoch: max iteration
common_path = common path of input data
"""

n = 5
batch_size = 500
fig = 45
max_epoch = 200
common_path = "../mnist"
data = common.Data(common_path + "/mnist_train/mnist_train_data",
                   common_path + "/mnist_train/mnist_train_label",
                   common_path + "/mnist_test/mnist_test_data",
                   common_path + "/mnist_test/mnist_test_label", 1, fig)

X = data.train_x.reshape(data.size, fig, fig, 1)
Y = data.train_y
testX = data.test_x.reshape(data.size_test, fig, fig, 1)
testY = data.test_y

img_prep = tflearn.ImagePreprocessing()
img_prep.add_featurewise_zero_center(per_channel=True)

img_aug = tflearn.ImageAugmentation()

net = tflearn.input_data(shape=[None, 45, 45, 1],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
예제 #12
0
파일: CNN.py 프로젝트: Hsveh/CS420_final_hw
common_path = "../mnist"

if os.path.exists(log_dir):
    os.remove(log_dir)
if os.path.exists(model_dir):
    os.remove(model_dir)
os.makedirs(log_dir, exist_ok=True)
os.makedirs(model_dir, exist_ok=True)
starttime = datetime.datetime.now()

logger = common.create_logger(
    'CNN', log_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')

data = common.Data(common_path + "/mnist_train/rotate_train.npy",
                   common_path + "/mnist_train/rotate_label.npy",
                   common_path + "/mnist_test/test_data.npy",
                   common_path + "/mnist_test/mnist_test_label", batch_size,
                   fig)


def weight_variable(shape, name):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial, name=name)


def bias_variable(shape, name):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial, name=name)


def conv2d(x, W):