def assertSuccess(self, input): errors = Errors() fl = Flatten(input, errors) self.assertEquals(errors.num_errors, 0) self.assertEquals(errors.num_warnings, 0) inl = Inline(input, errors) self.assertEquals(errors.num_errors, 0) self.assertEquals(errors.num_warnings, 0)
def compile(self, data): start_time = time.time() self.ast = program_grammar.parseString(data, parseAll=True)[0] print time.time() - start_time start_time = time.time() self.varcheck = VarCheck(self.ast, self.errors) self.flatten = Flatten(self.ast, self.errors) self.reduce = Reduce(self.ast, self.errors) self.inline = Inline(self.ast, self.errors) main_cfg = self.ast.symbol_table.lookup('main').cfg print main_cfg self.regalloc = RegisterAllocation(main_cfg) self.lin = Linearise(self.ast, self.errors) self.render = Render(self.lin.lines, self.errors) print time.time() - start_time return self.render.lines
def cnn_adam(X, Y, val_X, val_Y): """ CNN - ADAM """ model = Model(learning_rate=0.001, batch_size=32, epochs=200, optimizer=Adam()) model.add(Conv2D(2, (3, 3), activation='tanh')) model.add(Maxpool((2, 2), stride=2)) # 16x16 model.add(Dropout(0.5)) model.add(Conv2D(4, (3, 3), activation='tanh')) model.add(Maxpool((2, 2), stride=2)) # 8x8 model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(256, 32, activation='tanh')) model.add(Dense(32, 1)) print("Begin Training") model.train(X, Y, val_X, val_Y) model.save_history("experiments/cnn-adam.csv")
def test_flatten_01(self): f = Flatten() res = f.flatten([1, 2, [4, 5]]) self.assertEqual(res, [1, 2, 4, 5])
def test_flatten_02(self): f = Flatten() res = f.flatten([1, 2, [4, 5], 6, [7, 8, [[9]]]]) self.assertEqual(res, [1, 2, 4, 5, 6, 7, 8, 9])
def __init__(self): # Lenet # input: 28x28 # conv1: (5x5x6)@s1p2 -> 28x28x6 {(28-5+2x2)/1+1} # maxpool2: (2x2)@s2 -> 14x14x6 {(28-2)/2+1} # conv3: (5x5x16)@s1p0 -> 10x10x16 {(14-5)/1+1} # maxpool4: (2x2)@s2 -> 5x5x16 {(10-2)/2+1} # conv5: (5x5x120)@s1p0 -> 1x1x120 {(5-5)/1+1} # fc6: 120 -> 84 # fc7: 84 -> 10 # softmax: 10 -> 10 lr = 0.01 self.layers = [] self.layers.append( ConvolutionLayer(inputs_channel=1, num_filters=6, width=5, height=5, padding=2, stride=1, learning_rate=lr, name='conv1')) self.layers.append(ReLu()) self.layers.append( MaxPoolingLayer(width=2, height=2, stride=2, name='maxpool2')) self.layers.append( ConvolutionLayer(inputs_channel=6, num_filters=16, width=5, height=5, padding=0, stride=1, learning_rate=lr, name='conv3')) self.layers.append(ReLu()) self.layers.append( MaxPoolingLayer(width=2, height=2, stride=2, name='maxpool4')) self.layers.append( ConvolutionLayer(inputs_channel=16, num_filters=120, width=5, height=5, padding=0, stride=1, learning_rate=lr, name='conv5')) self.layers.append(ReLu()) self.layers.append(Flatten()) self.layers.append( FullyConnectedLayer(num_inputs=120, num_outputs=84, learning_rate=lr, name='fc6')) self.layers.append(ReLu()) self.layers.append( FullyConnectedLayer(num_inputs=84, num_outputs=10, learning_rate=lr, name='fc7')) self.layers.append(Softmax()) self.lay_num = len(self.layers)
height_shift_range=0.16, width_shift_range=0.16, img_row_axis=1, img_col_axis=2, img_channel_axis=0, horizontal_flip=True, vertical_flip=False) model.add_layer(ConvolutionalLayer(num_filters=32)) model.add_layer(Relu()) model.add_layer( ConvolutionalLayer(input_shape=[32, 32, 32], num_filters=32, filter_dims=[32, 3, 3])) model.add_layer(Relu()) model.add_layer(MaxPool()) model.add_layer(Flatten()) model.add_layer(Dense(input_shape=8192, neurons=650)) model.add_layer(Relu()) #model.add_layer(Dense(input_shape=1000,neurons=650)) #model.add_layer(Relu()) model.add_layer(Dense(input_shape=650, neurons=10)) model.add_layer(Softmax()) model.train()