Esempio n. 1
0
	def __init__(self, input_shape=(1, 1016), dense_interior_units=(10,), n_classes=1016,
				 wt_scale=1e-3, reg=0, verbose=True):
		'''
		Parameters:
		-----------
		input_shape: tuple. Shape of a SINGLE input sample. shape=(1, vocab_size)
			The 1 is hard-coded. We aren't adding mini-batch support, so we are always processing
			one target word at a time — hence, our batch dimension is 1.
			Adding mini-batch support is an extension.
		dense_interior_units: tuple. Number of hidden units in each dense layer (not counting output layer).
			Same as embedding dimension / embedding_sz.
		n_classes: int. Number of classes in the input.
			This is also the number of units in the Output Dense layer. Same as `vocab_sz`.
		wt_scale: float. Global weight scaling to use for all layers with weights
		reg: float. Regularization strength
		verbose: bool. Do we want to term network-related debug print outs on?
			NOTE: This is different than per-layer verbose settings, which you can turn on manually on below.

		TODO:
		1. Assemble the layers of the network and add them (in order) to `self.layers`.
		2. You will soon handle the "stacked" output layer via a modified softmax activation function.
		   Make the output layer activation function be this function: 'softmax_embedding'.
		3. Remember to define self.wt_layer_inds as the list indicies in self.layers that have weights.
		'''
		super().__init__(reg, verbose)

		_, vocab_sz = input_shape

		D1 = layer.Dense(number=1,name="Hidden",units=dense_interior_units[0],n_units_prev_layer=vocab_sz,wt_scale=wt_scale,activation="linear",reg=reg,verbose=False)
		self.layers.append(D1)
		D2 = layer.Dense(number=1,name="Output",units=vocab_sz,n_units_prev_layer=dense_interior_units[0],wt_scale=wt_scale,activation="softmax_embedding",reg=reg,verbose=False)
		self.layers.append(D2)
		self.wt_layer_inds = [0,1]
Esempio n. 2
0
 def __init__(self):
     super(CNN_modified,self).__init__()
     self.relu = layer.relu(2)
     self.conv1 = layer.Conv(1,32,kernel_size=5,padding=2,stride=1)
     self.pool1 = torch.nn.AvgPool2d(kernel_size=2,stride=2)
     self.conv2 = layer.Conv(32,64,kernel_size=5,padding=2,stride=1)
     self.pool2 = torch.nn.AvgPool2d(kernel_size=2,stride=2)
     self.dense1 = layer.Dense(7*7*64,1024)
     self.dense2 = layer.Dense(1024,10)
     self.precision = 0.
     self.epoch = 0
Esempio n. 3
0
    def _build(self):

        self.layers.append(
            layer.Dense(input_dim=self.input_dim,
                        output_dim=FLAGS.hidden1,
                        placeholders=self.placeholders,
                        act=tf.nn.relu,
                        dropout=True))

        for _ in range(self.num_layers - 2):
            self.layers.append(
                layer.Dense(input_dim=FLAGS.hidden1,
                            output_dim=FLAGS.hidden1,
                            placeholders=self.placeholders,
                            act=tf.nn.relu,
                            dropout=True))

        self.layers.append(
            layer.Dense(input_dim=FLAGS.hidden1,
                        output_dim=self.output_dim,
                        placeholders=self.placeholders,
                        act=tf.nn.relu,
                        dropout=True))
Esempio n. 4
0
def Dense_test():
    print('running Dense layer test.')
    HE = vhe.HE()
    x = (np.random.randn(1, 28 * 28) * 1000).astype(int).astype(object)

    T = HE.TGen(28 * 28)
    S = HE.getSecretkey(T)

    linear1 = layer.Dense(28 * 28, 100)
    S_new = linear1.build(HE, S)
    x_encrypted = HE.encrypt(T=T, x=x, batching=True)
    y_encrypted = linear1.crypted_forward(x_encrypted, HE)
    yy = HE.decrypt(S=S_new, c=y_encrypted, batching=True)
    y = linear1(torch.from_numpy(x.astype(np.float32)))
    delta_max = (yy - y).abs().max()
    print('max value difference is {}'.format(delta_max))
Esempio n. 5
0
    def __init__(self,
                 input_shape=(3, 32, 32),
                 n_kers=(32, ),
                 ker_sz=(7, ),
                 dense_interior_units=(100, ),
                 pooling_sizes=(2, ),
                 pooling_strides=(2, ),
                 n_classes=10,
                 wt_scale=1e-3,
                 reg=0,
                 verbose=True):
        '''
        Parameters:
        -----------
        input_shape: tuple. Shape of a SINGLE input sample (no mini-batch). By default: (n_chans, img_y, img_x)
        n_kers: tuple. Number of kernels/units in the 1st convolution layer. Format is (32,), which is a tuple
            rather than just an int. The reasoning is that if you wanted to create another Conv2D layer, say with 16
            units, n_kers would then be (32, 16). Thus, this format easily allows us to make the net deeper.
        ker_sz: tuple. x/y size of each convolution filter. Format is (7,), which means make 7x7 filters in the FIRST
            Conv2D layer. If we had another Conv2D layer with filters size 5x5, it would be ker_sz=(7,5)
        dense_interior_units: tuple. Number of hidden units in each dense layer. Same format as above.
            NOTE: Does NOT include the output layer, which has # units = # classes.
        pooling_sizes: tuple. Pooling extent in the i-th MaxPooling2D layer.  Same format as above.
        pooling_strides: tuple. Pooling stride in the i-th MaxPooling2D layer.  Same format as above.
        n_classes: int. Number of classes in the input. This will become the number of units in the Output Dense layer.
        wt_scale: float. Global weight scaling to use for all layers with weights
        reg: float. Regularization strength
        verbose: bool. Do we want to term network-related debug print outs on?
            NOTE: This is different than per-layer verbose settings, which are turned manually on below.

        TODO:
        1. Assemble the layers of the network and add them (in order) to `self.layers`.
        2. Remember to define self.wt_layer_inds as the list indicies in self.layers that have weights.
        '''

        super().__init__(reg, verbose)

        n_chans, h, w = input_shape
        # 1) Input convolutional layer
        self.layers.append(
            accelerated_layer.Conv2DAccel(len(self.layers),
                                          'Conv2',
                                          n_kers[0],
                                          ker_sz[0],
                                          n_chans,
                                          wt_scale,
                                          activation='relu',
                                          reg=reg,
                                          verbose=verbose))
        # 2) 2x2 max pooling layer
        self.layers.append(
            accelerated_layer.MaxPooling2DAccel(len(self.layers), 'MaxPool',
                                                pooling_sizes[0],
                                                pooling_strides[0], 'linear',
                                                reg, verbose))
        # 3) Dense layer
        self.layers.append(
            layer.Dense(len(self.layers), 'DenseRelu', dense_interior_units[0],
                        (filter_ops.get_pooling_out_shape(
                            w, pooling_sizes[0], pooling_strides[0])**2) *
                        n_kers[0], wt_scale, 'relu', reg, verbose))

        # 3.5) Another dense layer, why not?
        self.layers.append(
            layer.Dense(len(self.layers), 'DenseRelu2',
                        dense_interior_units[0], self.layers[-1].get_units(),
                        wt_scale, 'relu', reg, verbose))

        # 4) Dense softmax output layer
        self.layers.append(
            layer.Dense(len(self.layers), 'DenseSoftMax', n_classes,
                        self.layers[-1].get_units(), wt_scale, 'softmax', reg,
                        verbose))

        #only the indices of layers that have weights
        self.wt_layer_inds = [0, 2, 3, 4]
Esempio n. 6
0
import model
import layer
import optimizers
import pickle
import util
import numpy
import matplotlib.pyplot as plt

train_set, val_set, test_set = pickle.load(open("mnist.pkl", "rb"),
                                           encoding='latin1')

model = model.Sequence()
model.add(layer.Dense(300, input_dim=28 * 28, activation="Relu"))
#model.add(layer.Dense(300, activation="Relu"))
model.add(layer.Dense(10))

train_y = util.to_categorical(train_set[1])
idx = numpy.random.choice(train_set[0].shape[0], 50000)
train_set = train_set[0][idx]
train_y = train_y[idx]

model.init()
model.fit(input_data=train_set, output_data=train_y, epoch=500, batch_num=10)
model.compile(optimizer=optimizers.SGD(model, 0.1), loss="Mean_squared_error")
model.train()

id = 0
rightnum = 0
for now in val_set[0]:
    # plt.imshow(numpy.reshape(now,(28,28)))
    # plt.show()
Esempio n. 7
0
import model
import layer
import optimizers
import numpy
import pickle

model = model.Sequence()
model.add(layer.Dense(1, input_dim=2))
model.add(layer.Dense(2))
model.add(layer.Dense(5))

w = numpy.array([[1], [9]])
w2 = numpy.array([[5, 4]])
w3 = numpy.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])

text_x = numpy.random.randn(1000, 2)
text_y = numpy.dot(text_x, w)
text_y = numpy.dot(text_y, w2)
text_y = numpy.dot(text_y, w3)
text_y = text_y

model.init()
model.fit(text_x, text_y, epoch=10000, batch_num=100)
model.compile(loss="Mean_squared_error",
              optimizer=optimizers.SGD(model, speed=0.000001))
model.train()

t = ""
isfirst = True
for now in model.now_model:
    print(now.w)
Esempio n. 8
0
    def __init__(self,
                 input_shape=(3, 32, 32),
                 n_kers=(32, ),
                 ker_sz=(9, ),
                 dense_interior_units=(100, ),
                 pooling_sizes=(2, ),
                 pooling_strides=(2, ),
                 n_classes=10,
                 wt_scale=1e-3,
                 reg=0,
                 verbose=True):
        '''
        Parameters:
        -----------
        input_shape: tuple. Shape of a SINGLE input sample (no mini-batch). By default: (n_chans, img_y, img_x)
        n_kers: tuple. Number of kernels/units in the 1st convolution layer. Format is (32,), which is a tuple
            rather than just an int. The reasoning is that if you wanted to create another Conv2D layer, say with 16
            units, n_kers would then be (32, 16). Thus, this format easily allows us to make the net deeper.
        ker_sz: tuple. x/y size of each convolution filter. Format is (7,), which means make 7x7 filters in the FIRST
            Conv2D layer. If we had another Conv2D layer with filters size 5x5, it would be ker_sz=(7,5)
        dense_interior_units: tuple. Number of hidden units in each dense layer. Same format as above.
            NOTE: Does NOT include the output layer, which has # units = # classes.
        pooling_sizes: tuple. Pooling extent in the i-th MaxPooling2D layer.  Same format as above.
        pooling_strides: tuple. Pooling stride in the i-th MaxPooling2D layer.  Same format as above.
        n_classes: int. Number of classes in the input. This will become the number of units in the Output Dense layer.
        wt_scale: float. Global weight scaling to use for all layers with weights
        reg: float. Regularization strength
        verbose: bool. Do we want to term network-related debug print outs on?
            NOTE: This is different than per-layer verbose settings, which are turned manually on below.

        TODO:
        1. Assemble the layers of the network and add them (in order) to `self.layers`.
        2. Remember to define self.wt_layer_inds as the list indicies in self.layers that have weights.
        '''
        super().__init__(reg, verbose)

        n_chans, h, w = input_shape

        # 1) Input convolutional layer

        C = layer.Conv2DAccel(number=0,
                              name="Conv",
                              n_kers=n_kers[0],
                              ker_sz=ker_sz[0],
                              n_chans=n_chans,
                              wt_scale=wt_scale,
                              activation="relu",
                              reg=reg,
                              verbose=False)

        self.layers.append(C)

        # 2) 2x2 max pooling layer

        P = layer.MaxPooling2D(number=1,
                               name="Pool",
                               pool_size=pooling_sizes[0],
                               strides=pooling_strides[0],
                               activation="linear",
                               reg=reg,
                               verbose=False)

        self.layers.append(P)

        # 3) Dense layer

        pool_net_act_size_x = filter_ops.get_pooling_out_shape(
            w, pooling_sizes[0], pooling_strides[0])
        #print("pool_net_act_size_x: ",pool_net_act_size_x)
        pool_net_act_size_y = filter_ops.get_pooling_out_shape(
            h, pooling_sizes[0], pooling_strides[0])
        #print("pool_net_act_size_y: ",pool_net_act_size_y)
        #print("n_kers: ",n_kers[0])
        pool_net_act_size = pool_net_act_size_x * pool_net_act_size_x * n_kers[
            0]
        #print("pool_net_act_size: ",pool_net_act_size)

        D = layer.Dense(number=2,
                        name="Dense",
                        units=dense_interior_units[0],
                        n_units_prev_layer=pool_net_act_size,
                        wt_scale=wt_scale,
                        activation="relu",
                        reg=reg,
                        verbose=False)

        self.layers.append(D)

        # 4) Dense softmax output layer

        O = layer.Dense(number=3,
                        name="Output",
                        units=n_classes,
                        n_units_prev_layer=dense_interior_units[0],
                        wt_scale=wt_scale,
                        activation="softmax",
                        reg=reg,
                        verbose=False)

        self.layers.append(O)

        self.wt_layer_inds = [0, 2, 3]
Esempio n. 9
0
    def load_model(self, filename):
        # TODO: read layer.save_model texts and make a new layer and put them in self.layers
        # work in progress
        assert filename[-4:] == ".txt", "file must be of .txt extension"

        with open(filename, 'r') as f:
            print("loading ", f.readline())

            self.layers = []

            layer_data = f.readline().split('\t')
            for i, layer_info in enumerate(layer_data[:-1]):
                layer_info = layer_info.split('@')
                if layer_info[0] == "Dense":
                    L = layer.Dense(number=i,
                                    name="Dense" + str(i),
                                    units=1,
                                    n_units_prev_layer=1,
                                    wt_scale=1,
                                    activation=layer_info[1],
                                    reg=int(layer_info[2]),
                                    verbose=eval(layer_info[3]))
                    L.wts = np.asarray(eval(layer_info[4]))
                    L.b = np.asarray(eval(layer_info[5]))
                elif layer_info[0] == "Conv2D":
                    L = layer.Conv2D(number=i,
                                     name="Conv" + str(i),
                                     n_kers=1,
                                     ker_sz=1,
                                     n_chans=1,
                                     wt_scale=1,
                                     activation=layer_info[1],
                                     reg=int(layer_info[2]),
                                     verbose=eval(layer_info[3]))
                    L.wts = np.asarray(eval(layer_info[4]))
                    L.b = np.asarray(eval(layer_info[5]))
                elif layer_info[0] == "MaxPooling2D":
                    L = layer.MaxPooling2D(number=i,
                                           name="Pool" + str(i),
                                           pool_size=int(layer_info[1]),
                                           strides=int(layer_info[2]),
                                           activation=layer_info[3],
                                           reg=int(layer_info[4]),
                                           verbose=eval(layer_info[5]))
                self.layers.append(L)
            '''

            layer0 = f.readline().split('@')
            self.layers[0].wts = np.fromstring(layer0[0])
            self.layers[0].b = np.fromstring(layer0[1])

            layer2 = f.readline().split('@')
            self.layers[2].wts = np.fromstring(layer2[0])
            self.layers[2].b = np.fromstring(layer2[1])

            layer3 = f.readline().split('@')
            self.layers[3].wts = np.fromstring(layer3[0])
            self.layers[3].b = np.fromstring(layer3[1])
            '''

            self.wt_layer_inds = eval(f.readline())
            self.reg = int(f.readline())
            self.verbose = eval(f.readline())
            self.loss_history = eval(f.readline())
            self.train_acc_history = eval(f.readline())
            self.validation_acc_history = eval(f.readline())
Esempio n. 10
0
                layer_dims = vals[1:-1]
                continue
            else:
                vals = line.split(' ')
                vals = [num(x) for x in vals]
                indat.append(vals[:indim])
                targets.append(vals[indim:])

    indat = np.array(indat)
    targets = np.array(targets)

    # print "Input Data : %s \nOutput Data : %s\n" % (indat, targets)

    # build the model
    mm = model.Model(indim)
    last_dim = indim
    for ldim in layer_dims:
        mm.push_layer(layer.Dense(ldim, last_dim))
        last_dim = ldim
    mm.push_layer(layer.Dense(outdim, last_dim, activation="none"))

    print str(mm)

    print "Beginning training session\n"

    mm.train(indat, targets, 1, epochs)

    for line in sys.stdin:
        inp = [float(x) for x in line.strip().split(' ')]
        print mm.predict(np.array([inp]))