def __init__(self): self.model = Sequential() self.model.add( convolutional.Convolution2D(input_shape=(49, 19, 19), nb_filter=K, nb_row=5, nb_col=5, init='uniform', activation='relu', border_mode='same')) for i in range(2, 13): self.model.add( convolutional.Convolution2D(nb_filter=K, nb_row=3, nb_col=3, init='uniform', activation='relu', border_mode='same')) self.model.add( convolutional.Convolution2D(nb_filter=1, nb_row=1, nb_col=1, init='uniform', activation='linear', border_mode='same')) self.model.add(Flatten()) self.model.add(Dense(256, init='uniform')) self.model.add(Dense(1, init='uniform', activation="tanh"))
def make_model(): network = Sequential() network.add( Lambda(lambda pixel: pixel / 255 - 0.5, input_shape=(160, 320, 3))) network.add(convolutional.Cropping2D(cropping=((70, 25), (0, 0)))) network.add( convolutional.Convolution2D(24, 5, 5, activation='relu', subsample=(2, 2))) network.add( convolutional.Convolution2D(36, 5, 5, activation='relu', subsample=(2, 2))) network.add( convolutional.Convolution2D(48, 5, 5, activation='relu', subsample=(2, 2))) network.add(convolutional.Convolution2D(64, 3, 3, activation='relu')) network.add(convolutional.Convolution2D(64, 3, 3, activation='relu')) network.add(Flatten()) network.add(Dense(100)) network.add(Dense(50)) network.add(Dense(10)) network.add(Dense(1)) return network
def create_simple_model(num_classes, layer1_filters=32, layer2_filters=64): epochs = 5 n_conv = 2 model = models.Sequential() # First layer model.add(conv.ZeroPadding2D( (1, 1), input_shape=(1, IMG_COLS, IMG_ROWS), )) model.add( conv.Convolution2D(layer1_filters, n_conv, n_conv, activation="relu")) model.add(conv.MaxPooling2D(strides=(2, 2))) # Second layer model.add(conv.ZeroPadding2D((1, 1))) model.add( conv.Convolution2D(layer2_filters, n_conv, n_conv, activation="relu")) model.add(conv.MaxPooling2D(strides=(2, 2))) model.add(core.Flatten()) model.add(core.Dropout(0.2)) model.add(core.Dense(128, activation="relu")) model.add(core.Dense(num_classes, activation="softmax")) model.summary() model.compile(loss="categorical_crossentropy", optimizer="adadelta", metrics=["accuracy"]) return model, epochs
def Dave_v3(input_tensor=None, load_weights=False): model = models.Sequential() model.add( convolutional.Convolution2D(16, 3, 3, input_shape=(32, 128, 3), activation='relu')) model.add(pooling.MaxPooling2D(pool_size=(2, 2))) model.add(convolutional.Convolution2D(32, 3, 3, activation='relu')) model.add(pooling.MaxPooling2D(pool_size=(2, 2))) model.add(convolutional.Convolution2D(64, 3, 3, activation='relu')) model.add(pooling.MaxPooling2D(pool_size=(2, 2))) model.add(core.Flatten()) model.add(core.Dense(500, activation='relu')) #model.add(core.Dropout(.5)) model.add(core.Dense(100, activation='relu')) #model.add(core.Dropout(.25)) model.add(core.Dense(20, activation='relu')) model.add(core.Dense(1)) model.add( Lambda(One_to_radius, output_shape=atan_layer_shape, name="prediction")) if load_weights: model.load_weights('./models/dave3/dave3.h5') model.compile(optimizer=optimizers.Adam(lr=1e-04), loss='mean_squared_error') return model
def test_convolution_2d_dim_ordering(): nb_filter = 4 nb_row = 3 nb_col = 2 stack_size = 3 np.random.seed(1337) weights = [ np.random.random((nb_filter, stack_size, nb_row, nb_col)), np.random.random(nb_filter) ] input = np.random.random((1, stack_size, 10, 10)) layer = convolutional.Convolution2D(nb_filter, nb_row, nb_col, weights=weights, input_shape=input.shape[1:], dim_ordering='th') layer.input = K.variable(input) out_th = K.eval(layer.get_output(False)) input = np.transpose(input, (0, 2, 3, 1)) weights[0] = np.transpose(weights[0], (2, 3, 1, 0)) layer = convolutional.Convolution2D(nb_filter, nb_row, nb_col, weights=weights, input_shape=input.shape[1:], dim_ordering='tf') layer.input = K.variable(input) out_tf = K.eval(layer.get_output(False)) assert_allclose(out_tf, np.transpose(out_th, (0, 2, 3, 1)), atol=1e-05)
def create_network(**kwargs): model = Sequential() model.add( convolutional.Convolution2D(input_shape=(49, 19, 19), nb_filter=K, nb_row=5, nb_col=5, init='uniform', activation='relu', border_mode='same')) for i in range(2, 13): model.add( convolutional.Convolution2D(nb_filter=K, nb_row=3, nb_col=3, init='uniform', activation='relu', border_mode='same')) model.add( convolutional.Convolution2D(nb_filter=1, nb_row=1, nb_col=1, init='uniform', activation='linear', border_mode='same')) model.add(Flatten()) model.add(Dense(256, init='uniform')) model.add(Dense(1, init='uniform', activation="tanh")) return model
def __init__(self, img_size, nb_classes): batch_size = 128 img_rows, img_cols = img_size nb_filters_1 = 32 # 64 nb_filters_2 = 64 # 128 nb_filters_3 = 128 # 256 nb_conv = 3 cnn = models.Sequential() cnn.add(conv.Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu", input_shape=(img_rows, img_cols, 1), border_mode='same')) cnn.add(conv.Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu", border_mode='same')) cnn.add(conv.MaxPooling2D(strides=(2, 2))) cnn.add(conv.Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu", border_mode='same')) cnn.add(conv.Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu", border_mode='same')) cnn.add(conv.MaxPooling2D(strides=(2, 2))) # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same')) # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same')) # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same')) # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same')) # cnn.add(conv.MaxPooling2D(strides=(2,2))) cnn.add(core.Flatten()) cnn.add(core.Dropout(0.2)) cnn.add(core.Dense(128, activation="relu")) # 4096 cnn.add(core.Dense(nb_classes, activation="softmax")) cnn.summary() cnn.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) self.cnn = cnn
def main(n_filters, conv_size, pool_size, dropout, patch_size, n_astro=7, out_path=None): # Imports must be in the function, or whenever we import this module, Keras # will dump to stdout. import keras.layers.core as core from keras.layers import Input, Dense, Concatenate import keras.layers.convolutional as conv import keras.layers.merge from keras.models import Model im_in = Input(shape=(1, patch_size, patch_size)) astro_in = Input(shape=(n_astro, )) # 1 x 32 x 32 conv1 = conv.Convolution2D(filters=n_filters, kernel_size=(conv_size, conv_size), border_mode='valid', activation='relu', data_format='channels_first')(im_in) # 32 x 28 x 28 pool1 = conv.MaxPooling2D(pool_size=(pool_size, pool_size), data_format='channels_first')(conv1) # 32 x 14 x 14 conv2 = conv.Convolution2D(filters=n_filters, kernel_size=(conv_size, conv_size), border_mode='valid', activation='relu', data_format='channels_first')(pool1) # 32 x 10 x 10 pool2 = conv.MaxPooling2D(pool_size=(pool_size, pool_size), data_format='channels_first')(conv2) # 32 x 5 x 5 conv3 = conv.Convolution2D(filters=n_filters, kernel_size=(conv_size, conv_size), border_mode='valid', activation='relu', data_format='channels_first')(pool2) # 32 x 1 x 1 dropout = core.Dropout(dropout)(conv3) flatten = core.Flatten()(dropout) conc = Concatenate()([astro_in, flatten]) lr = Dense(1, activation='sigmoid')(conc) model = Model(inputs=[astro_in, im_in], outputs=[lr]) model.compile(loss='binary_crossentropy', optimizer='adadelta') model_json = model.to_json() if out_path is not None: with open(out_path, 'w') as f: f.write(model_json) return model_json
def __init__(self): self.model = Sequential() self.model.add(convolutional.Convolution2D(input_shape=(48, 19, 19), nb_filter=K, nb_row=5, nb_col=5, init='uniform', activation='relu', border_mode='same')) for i in range(2,13): self.model.add(convolutional.Convolution2D(nb_filter=K, nb_row=3, nb_col=3, init='uniform', activation='relu', border_mode='same')) self.model.add(convolutional.Convolution2D(nb_filter=1, nb_row=1, nb_col=1, init='uniform', border_mode='same')) self.model.add(Reshape((19,19))) self.model.add(Activation('softmax')) sgd = SGD(lr=LEARNING_RATE, decay=DECAY) self.model.compile(loss='binary_crossentropy', optimizer=sgd)
def add_resnet_unit(path, K, **params): """Add a resnet unit to path starting at layer 'K', adding as many (ReLU + Conv2D) modules as specified by n_skip_K Returns new path and next layer index, i.e. K + n_skip_K, in a tuple """ # loosely based on https://github.com/keunwoochoi/residual_block_keras # (see also keras docs here: http://keras.io/getting-started/functional-api-guide/#all-models-are-callable-just-like-layers) block_input = path # use n_skip_K if it is there, default to 1 skip_key = "n_skip_%d" % K n_skip = params.get(skip_key, 1) for i in range(n_skip): layer = K + i # add BatchNorm path = BatchNormalization()(path) # add ReLU path = Activation('relu')(path) # use filter_width_K if it is there, otherwise use 3 filter_key = "filter_width_%d" % layer filter_width = params.get(filter_key, 3) # add Conv2D path = convolutional.Convolution2D( nb_filter=params["filters_per_layer"], nb_row=filter_width, nb_col=filter_width, init='uniform', activation='linear', border_mode='same')(path) # Merge 'input layer' with the path path = merge([block_input, path], mode='sum') return path, K + n_skip
def __init__(self): self.model = Sequential() self.model.add(convolutional.Convolution2D(input_shape=(49, 19, 19), nb_filter=K, nb_row=5, nb_col=5, init='uniform', activation='relu', border_mode='same')) for i in range(2,13): self.model.add(convolutional.Convolution2D(nb_filter=K, nb_row=3, nb_col=3, init='uniform', activation='relu', border_mode='same')) self.model.add(convolutional.Convolution2D(nb_filter=1, nb_row=1, nb_col=1, init='uniform', activation='linear', border_mode='same')) self.model.add(Flatten()) self.model.add(Dense(256,init='uniform')) self.model.add(Dense(1,init='uniform',activation="tanh")) sgd = SGD(lr=LEARNING_RATE, decay=DECAY) self.model.compile(loss='mean_squared_error', optimizer=sgd)
def Simple_Convo(train, nb_classes): batch_size = 128 img_rows, img_cols = 56, 56 nb_filters_1 = 32 # 64 nb_filters_2 = 64 # 128 nb_filters_3 = 128 # 256 nb_conv = 3 # train = np.concatenate([train, train], axis=1) trainX = train[:, 1:].reshape(train.shape[0], 28, 28, 1) trainX = trainX.astype(float) trainX /= 255.0 trainX = np.concatenate([trainX, np.roll(trainX, 14, axis=1)], axis=1) trainX = np.concatenate([trainX, np.fliplr(np.roll(trainX, 7, axis=2))], axis=2) print(trainX.shape) cnn = models.Sequential() cnn.add(conv.Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu", input_shape=(img_rows, img_cols, 1), border_mode='same')) cnn.add(conv.Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu", border_mode='same')) cnn.add(conv.MaxPooling2D(strides=(2, 2))) cnn.add(conv.Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu", border_mode='same')) cnn.add(conv.Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu", border_mode='same')) cnn.add(conv.MaxPooling2D(strides=(2, 2))) # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same')) # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same')) # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same')) # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same')) # cnn.add(conv.MaxPooling2D(strides=(2,2))) cnn.add(core.Flatten()) cnn.add(core.Dropout(0.2)) cnn.add(core.Dense(128, activation="relu")) # 4096 cnn.add(core.Dense(nb_classes, activation="softmax")) cnn.summary() return cnn
def test_convolution_2d(self): nb_samples = 8 nb_filter = 9 stack_size = 7 nb_row = 10 nb_col = 6 input_nb_row = 11 input_nb_col = 12 weights_in = [ np.ones((nb_filter, stack_size, nb_row, nb_col)), np.ones(nb_filter) ] self.assertRaises(Exception, convolutional.Convolution2D, nb_filter, stack_size, nb_row, nb_col, border_mode='foo') input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col)) for weight in [None, weights_in]: for border_mode in ['valid', 'full', 'same']: for subsample in [(1, 1), (2, 3)]: if border_mode == 'same' and subsample != (1, 1): continue for W_regularizer in [None, 'l2']: for b_regularizer in [None, 'l2']: for act_regularizer in [None, 'l2']: layer = convolutional.Convolution2D( nb_filter, stack_size, nb_row, nb_col, weights=weight, border_mode=border_mode, W_regularizer=W_regularizer, b_regularizer=b_regularizer, activity_regularizer=act_regularizer, subsample=subsample) layer.input = theano.shared(value=input) for train in [True, False]: out = layer.get_output(train).eval() if border_mode == 'same' and subsample == ( 1, 1): assert out.shape[2:] == input.shape[2:] config = layer.get_config()
def test_convolution_2d(): nb_samples = 8 nb_filter = 9 stack_size = 7 nb_row = 10 nb_col = 6 input_nb_row = 11 input_nb_col = 12 weights_in = [ np.ones((nb_filter, stack_size, nb_row, nb_col)), np.ones(nb_filter) ] input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col)) for weight in [None, weights_in]: for border_mode in ['valid', 'same']: for subsample in [(1, 1), (2, 2)]: if border_mode == 'same' and subsample != (1, 1): continue for W_regularizer in [None, 'l2']: for b_regularizer in [None, 'l2']: for act_regularizer in [None, 'l2']: layer = convolutional.Convolution2D( nb_filter, nb_row, nb_col, weights=weight, border_mode=border_mode, W_regularizer=W_regularizer, b_regularizer=b_regularizer, activity_regularizer=act_regularizer, subsample=subsample, input_shape=(stack_size, None, None)) layer.input = K.variable(input) for train in [True, False]: out = K.eval(layer.get_output(train)) if border_mode == 'same' and subsample == (1, 1): assert out.shape[2:] == input.shape[2:] layer.get_config()
def cpg_layers(params): layers = [] if params.drop_in: layer = kcore.Dropout(params.drop_in) layers.append(('xd', layer)) nb_layer = len(params.nb_filter) w_reg = kr.WeightRegularizer(l1=params.l1, l2=params.l2) for l in range(nb_layer): layer = kconv.Convolution2D(nb_filter=params.nb_filter[l], nb_row=1, nb_col=params.filter_len[l], activation=params.activation, init='glorot_uniform', W_regularizer=w_reg, border_mode='same') layers.append(('c%d' % (l + 1), layer)) layer = kconv.MaxPooling2D(pool_size=(1, params.pool_len[l])) layers.append(('p%d' % (l + 1), layer)) layer = kcore.Flatten() layers.append(('f1', layer)) if params.drop_out: layer = kcore.Dropout(params.drop_out) layers.append(('f1d', layer)) if params.nb_hidden: layer = kcore.Dense(params.nb_hidden, activation='linear', init='glorot_uniform') layers.append(('h1', layer)) if params.batch_norm: layer = knorm.BatchNormalization() layers.append(('h1b', layer)) layer = kcore.Activation(params.activation) layers.append(('h1a', layer)) if params.drop_out: layer = kcore.Dropout(params.drop_out) layers.append(('h1d', layer)) return layers
def create_network(**kwargs): """construct a convolutional neural network with Resnet-style skip connections. Arguments are the same as with the default CNNPolicy network, except the default number of layers is 20 plus a new n_skip parameter Keword Arguments: - input_dim: depth of features to be processed by first layer (no default) - board: width of the go board to be processed (default 19) - filters_per_layer: number of filters used on every layer (default 128) - layers: number of convolutional steps (default 20) - filter_width_K: (where K is between 1 and <layers>) width of filter on layer K (default 3 except 1st layer which defaults to 5). Must be odd. - n_skip_K: (where K is as in filter_width_K) number of convolutional layers to skip with the linear path starting at K. Only valid at K >= 1. (Each layer defaults to 1) Note that n_skip_1=s means that the next valid value of n_skip_* is 3 A diagram may help explain (numbers indicate layer): 1 2 3 4 5 6 I--C--B--R--C--B--R--C--M--B--R--C--B--R--C--B--R--C--M ... M --R--F--O \__________________/ \___________________________/ \ ... / [n_skip_1 = 2] [n_skip_3 = 3] I - input B - BatchNormalization R - ReLU C - Conv2D F - Flatten O - output M - merge The input is always passed through a Conv2D layer, the output of which layer is counted as '1'. Each subsequent [R -- C] block is counted as one 'layer'. The 'merge' layer isn't counted; hence if n_skip_1 is 2, the next valid skip parameter is n_skip_3, which will start at the output of the merge """ defaults = { "board": 9, "filters_per_layer": 128, "layers": 20, "filter_width_1": 5 } # copy defaults, but override with anything in kwargs params = defaults params.update(kwargs) # create the network using Keras' functional API, # since this isn't 'Sequential' model_input = Input(shape=(params['input_dim'], params['board'], params['board'])) # create first layer convolution_path = convolutional.Convolution2D( input_shape=(), nb_filter=params['filters_per_layer'], nb_row=params['filter_width_1'], nb_col=params['filter_width_1'], init='uniform', activation='linear', border_name='same')(model_input)
def create_network(**kwargs): """construct a convolutional neural network with Resnet-style skip connections. Arguments are the same as with the default CNNPolicy network, except the default number of layers is 20 plus a new n_skip parameter Keword Arguments: - input_dim: depth of features to be processed by first layer (no default) - board: width of the go board to be processed (default 19) - filters_per_layer: number of filters used on every layer (default 128) - layers: number of convolutional steps (default 20) - filter_width_K: (where K is between 1 and <layers>) width of filter on layer K (default 3 except 1st layer which defaults to 5). Must be odd. - n_skip_K: (where K is as in filter_width_K) number of convolutional layers to skip with the linear path starting at K. Only valid at K >= 1. (Each layer defaults to 1) Note that n_skip_1=s means that the next valid value of n_skip_* is 3 A diagram may help explain (numbers indicate layer): 1 2 3 4 5 6 I--C -- B -- R -- C -- B -- R -- C -- M -- B -- R -- C -- B -- R -- C -- B -- R -- C -- M ... M -- R -- F -- O \___________________________/ \____________________________________________________/ \ ... / [n_skip_1 = 2] [n_skip_3 = 3] I - input B - BatchNormalization R - ReLU C - Conv2D F - Flatten O - output M - merge The input is always passed through a Conv2D layer, the output of which layer is counted as '1'. Each subsequent [R -- C] block is counted as one 'layer'. The 'merge' layer isn't counted; hence if n_skip_1 is 2, the next valid skip parameter is n_skip_3, which will start at the output of the merge """ defaults = { "board": 19, "filters_per_layer": 128, "layers": 20, "filter_width_1": 5 } # copy defaults, but override with anything in kwargs params = defaults params.update(kwargs) # create the network using Keras' functional API, # since this isn't 'Sequential' model_input = Input(shape=(params["input_dim"], params["board"], params["board"])) # create first layer convolution_path = convolutional.Convolution2D( input_shape=(), nb_filter=params["filters_per_layer"], nb_row=params["filter_width_1"], nb_col=params["filter_width_1"], init='uniform', activation='linear', # relu activations done inside resnet modules border_mode='same')(model_input) def add_resnet_unit(path, K, **params): """Add a resnet unit to path starting at layer 'K', adding as many (ReLU + Conv2D) modules as specified by n_skip_K Returns new path and next layer index, i.e. K + n_skip_K, in a tuple """ # loosely based on https://github.com/keunwoochoi/residual_block_keras # (see also keras docs here: http://keras.io/getting-started/functional-api-guide/#all-models-are-callable-just-like-layers) block_input = path # use n_skip_K if it is there, default to 1 skip_key = "n_skip_%d" % K n_skip = params.get(skip_key, 1) for i in range(n_skip): layer = K + i # add BatchNorm path = BatchNormalization()(path) # add ReLU path = Activation('relu')(path) # use filter_width_K if it is there, otherwise use 3 filter_key = "filter_width_%d" % layer filter_width = params.get(filter_key, 3) # add Conv2D path = convolutional.Convolution2D( nb_filter=params["filters_per_layer"], nb_row=filter_width, nb_col=filter_width, init='uniform', activation='linear', border_mode='same')(path) # Merge 'input layer' with the path path = merge([block_input, path], mode='sum') return path, K + n_skip # create all other layers layer = 1 while layer < params['layers']: convolution_path, layer = add_resnet_unit(convolution_path, layer, **params) if layer > params['layers']: print("Due to skipping, ended with {} layers instead of {}".format( layer, params['layers'])) # since each layer's activation was linear, need one more ReLu convolution_path = Activation('relu')(convolution_path) # the last layer maps each <filters_per_layer> featuer to a number convolution_path = convolutional.Convolution2D( nb_filter=1, nb_row=1, nb_col=1, init='uniform', border_mode='same')(convolution_path) # flatten output network_output = Flatten()(convolution_path) # add a bias to each board location network_output = Bias()(network_output) # softmax makes it into a probability distribution network_output = Activation('softmax')(network_output) return Model(input=[model_input], output=[network_output])
import pandas as pd from keras import models, optimizers, backend from keras.layers import core, convolutional, pooling from sklearn import model_selection,utils from dataPreprocessing import generate_samples, preprocess if __name__ == '__main__': # Read splitted data df_train = pd.read_csv('train.csv') df_valid = pd.read_csv('test.csv') # CNN Model Architecture model = models.Sequential() model.add(convolutional.Convolution2D(16, 3, 3, input_shape=(32, 128, 3), activation='relu')) model.add(pooling.MaxPooling2D(pool_size=(2, 2))) model.add(convolutional.Convolution2D(32, 3, 3, activation='relu')) model.add(pooling.MaxPooling2D(pool_size=(2, 2))) model.add(convolutional.Convolution2D(64, 3, 3, activation='relu')) model.add(pooling.MaxPooling2D(pool_size=(2, 2))) model.add(core.Flatten()) model.add(core.Dense(500, activation='relu')) model.add(core.Dropout(.5)) model.add(core.Dense(100, activation='relu')) model.add(core.Dropout(.25)) model.add(core.Dense(20, activation='relu')) model.add(core.Dense(1)) model.compile(optimizer=optimizers.Adam(lr=1e-04), loss='mean_squared_error') # load the exist model
def create_network(**kwargs): """construct a convolutional neural network. Keword Arguments: - input_dim: depth of features to be processed by first layer (no default) - board: width of the go board to be processed (default 19) - filters_per_layer: number of filters used on every layer (default 128) - filters_per_layer_K: (where K is between 1 and <layers>) number of filters used on layer K (default #filters_per_layer) - layers: number of convolutional steps (default 12) - filter_width_K: (where K is between 1 and <layers>) width of filter on layer K (default 3 except 1st layer which defaults to 5). Must be odd. """ defaults = { "board": 9, "filters_per_layer": 128, "layers": 12, "filter_width_1": 5 } # copy defaults, but override with anything in kwargs params = defaults params.update(kwargs) # create the network: # a series of zero-paddings followed by convolutions # such that the output dimensions are also board x board network = Sequential() # create first layer network.add( convolutional.Convolution2D( input_shape=(params["input_dim"], params['board'], params['board']), nb_filter=params.get('filters_per_layer_1', params['filters_per_layer']), nb_row=params['filter_width_1'], nb_col=params['filter_width_1'], init='uniform', activation='relu', border_mode='same')) # create all other layers for i in range(2, params['layers'] + 1): # use filter_width_K if it is there, otherwise use 3 filter_key = 'filter_width_%d' % i filter_width = params.get(filter_key, 3) # use filters_per_layer_K if it is there, otherwise use default value filter_count_key = "filters_per_layer_%d" % i filter_nb = params.get(filter_count_key, params['filters_per_layer']) network.add( convolutional.Convolution2D(nb_filter=filter_nb, nb_row=filter_width, nb_col=filter_width, init='uniform', activation='relu', border_mode='same')) # the last layer maps each <filters_per_layer> feature to a number network.add( convolutional.Convolution2D(nb_filter=1, nb_row=1, nb_col=1, init='uniform', border_mode='same')) # reshape output to be board x board network.add(Flatten()) # add a bias to each board location network.add(Bias()) network.add(Activation('softmax')) return network
def test_TimeDistributed(): # first, test with Dense layer model = Sequential() model.add(wrappers.TimeDistributed(core.Dense(2), input_shape=(3, 4))) model.add(core.Activation('relu')) model.compile(optimizer='rmsprop', loss='mse') model.fit(np.random.random((10, 3, 4)), np.random.random((10, 3, 2)), nb_epoch=1, batch_size=10) # test config model.get_config() # compare to TimeDistributedDense test_input = np.random.random((1, 3, 4)) test_output = model.predict(test_input) weights = model.layers[0].get_weights() reference = Sequential() reference.add( core.TimeDistributedDense(2, input_shape=(3, 4), weights=weights)) reference.add(core.Activation('relu')) reference.compile(optimizer='rmsprop', loss='mse') reference_output = reference.predict(test_input) assert_allclose(test_output, reference_output, atol=1e-05) # test when specifying a batch_input_shape reference = Sequential() reference.add( core.TimeDistributedDense(2, batch_input_shape=(1, 3, 4), weights=weights)) reference.add(core.Activation('relu')) reference.compile(optimizer='rmsprop', loss='mse') reference_output = reference.predict(test_input) assert_allclose(test_output, reference_output, atol=1e-05) # test with Convolution2D model = Sequential() model.add( wrappers.TimeDistributed(convolutional.Convolution2D( 5, 2, 2, border_mode='same'), input_shape=(2, 4, 4, 3))) model.add(core.Activation('relu')) model.compile(optimizer='rmsprop', loss='mse') model.train_on_batch(np.random.random((1, 2, 4, 4, 3)), np.random.random((1, 2, 4, 4, 5))) model = model_from_json(model.to_json()) model.summary() # test stacked layers model = Sequential() model.add(wrappers.TimeDistributed(core.Dense(2), input_shape=(3, 4))) model.add(wrappers.TimeDistributed(core.Dense(3))) model.add(core.Activation('relu')) model.compile(optimizer='rmsprop', loss='mse') model.fit(np.random.random((10, 3, 4)), np.random.random((10, 3, 3)), nb_epoch=1, batch_size=10) # test wrapping Sequential model model = Sequential() model.add(core.Dense(3, input_dim=2)) outer_model = Sequential() outer_model.add(wrappers.TimeDistributed(model, input_shape=(3, 2))) outer_model.compile(optimizer='rmsprop', loss='mse') outer_model.fit(np.random.random((10, 3, 2)), np.random.random((10, 3, 3)), nb_epoch=1, batch_size=10) # test with functional API x = Input(shape=(3, 2)) y = wrappers.TimeDistributed(model)(x) outer_model = Model(x, y) outer_model.compile(optimizer='rmsprop', loss='mse') outer_model.fit(np.random.random((10, 3, 2)), np.random.random((10, 3, 3)), nb_epoch=1, batch_size=10)
# read data from hard drive train_data_raw = pd.read_csv("./input/train.csv").values test_data_raw = pd.read_csv("./input/test.csv").values img_cols = 28 img_rows = 28 train_X = train_data_raw[:, 1:].reshape(train_data_raw.shape[0], 1, img_rows, img_cols) train_Y = kutils.to_categorical(train_data_raw[:, 0]) num_class = train_Y.shape[1] num_filters_1 = 64 conv_dim = 3 cnn = kmodels.Sequential() cnn.add(kconv.ZeroPadding2D((1,1), input_shape=(1, 28, 28),)) cnn.add(kconv.Convolution2D(num_filters_1, conv_dim, conv_dim, activation="relu")) cnn.add(kpool.MaxPooling2D(strides=(2, 2))) num_filters_2 = 128 cnn.add(kconv.ZeroPadding2D((1, 1))) cnn.add(kconv.Convolution2D(num_filters_2, conv_dim, conv_dim, activation="relu")) cnn.add(kpool.MaxPooling2D(strides=(2, 2))) conv_dim_2 = 3 cnn.add(kconv.ZeroPadding2D((1, 1))) cnn.add(kconv.Convolution2D(num_filters_2, conv_dim_2, conv_dim_2, activation="relu")) cnn.add(kpool.MaxPooling2D(strides=(2, 2))) cnn.add(kconv.ZeroPadding2D((1, 1))) cnn.add(kconv.Convolution2D(num_filters_2, conv_dim_2, conv_dim_2, activation="relu")) cnn.add(kpool.MaxPooling2D(strides=(2, 2)))
nb_conv = 3 trainX = train[:, 1:].reshape(train.shape[0], 1, img_rows, img_cols) trainX = trainX.astype(float) trainX /= 255.0 # preprocess the data trainY = kutils.to_categorical(train[:, 0]) nb_classes = trainY.shape[1] cnn = models.Sequential() cnn.add(conv.ZeroPadding2D( (1, 1), input_shape=(1, 48, 48), )) cnn.add(conv.Convolution2D(32, 3, 3, activation="relu")) cnn.add(conv.ZeroPadding2D((1, 1))) cnn.add(conv.Convolution2D(32, 3, 3, activation="relu")) cnn.add(conv.MaxPooling2D(strides=(2, 2))) cnn.add(conv.ZeroPadding2D((1, 1))) cnn.add(conv.Convolution2D(64, 3, 3, activation="relu")) cnn.add(conv.ZeroPadding2D((1, 1))) cnn.add(conv.Convolution2D(64, 3, 3, activation="relu")) cnn.add(conv.MaxPooling2D(strides=(2, 2))) # cnn.add(conv.ZeroPadding2D((1, 1))) # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu")) # cnn.add(conv.ZeroPadding2D((1, 1))) # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu")) # cnn.add(conv.ZeroPadding2D((1, 1)))
nb_actions = env.action_space.n # Next, we build a very simple model. model = Sequential() # model.add(convolutional.Convolution2D(32, 3, 3, activation='tanh', dim_ordering='th', # input_shape=(1,) + env.observation_space.shape)) # model.add(pooling.MaxPooling2D(pool_size=(2, 2), dim_ordering='th')) # model.add(convolutional.Convolution2D(32, 3, 3, activation='tanh', dim_ordering='th')) # model.add(pooling.MaxPooling2D(pool_size=(2, 2), dim_ordering='th')) # model.add(convolutional.Convolution2D(32, 3, 3, activation='tanh', dim_ordering='th')) # model.add(pooling.MaxPooling2D(pool_size=(2, 2), dim_ordering='th')) # model.add(convolutional.Convolution2D(16, 3, 3, activation='tanh', dim_ordering='th')) # model.add(Flatten()) # model.add(Dense(128, activation='tanh')) model.add(Reshape(env.observation_space.shape, input_shape=(1,) + env.observation_space.shape)) model.add(convolutional.Convolution2D(32, 9, 9, subsample=(4, 4), activation='relu', dim_ordering='tf')) model.add(convolutional.Convolution2D(32, 5, 5, subsample=(2, 2), activation='relu', dim_ordering='tf')) model.add(convolutional.Convolution2D(32, 3, 3, activation='relu', dim_ordering='tf')) model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(nb_actions)) model.add(Activation('linear')) print(model.summary()) # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and # even the metrics! memory = SequentialMemory(limit=5000, window_length=1) policy = BoltzmannQPolicy() dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=128,
local_project_path = './' local_data_path = os.path.join(local_project_path, 'data') # ### Model # <i>Inspired from [Navoshta](https://github.com/navoshta/behavioral-cloning) and [Navoshta](https://github.com/jeremy-shannon/CarND-Behavioral-Cloning-Project)</i> # if not False: model = Sequential() # Normalize model.add( convolutional.Convolution2D(16, 3, 3, input_shape=(32, 128, 3), activation='relu', W_regularizer=l2(0.001))) model.add(pooling.MaxPooling2D(pool_size=(2, 2))) model.add( convolutional.Convolution2D(32, 3, 3, activation='relu', W_regularizer=l2(0.001))) model.add(pooling.MaxPooling2D(pool_size=(2, 2))) model.add( convolutional.Convolution2D(64, 3, 3, activation='relu',
nb_filters_3 = 128 # 256 nb_conv = 3 X_train = train[:, 1:].reshape(train.shape[0], 28, 28, 1) # X_train = train[:,1:].reshape(-1, 28, 28, 1) X_train = X_train.astype(float) X_train = X_train / 255.0 Y_train = kutils.to_categorical(train[:, 0]) cnn = models.Sequential() cnn.add( conv.Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu", input_shape=(28, 28, 1), border_mode="same")) cnn.add( conv.Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu", border_mode="same")) cnn.add(conv.MaxPooling2D(strides=(2, 2))) cnn.add( conv.Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu",
import keras.models as kmodel import keras.layers.convolutional as kconv import keras.layers.core as klcore ds = dataset.DataSet.load_from_path('usps', '../gmllib/datasets/usps') # convert to 2D images x_train = np.reshape(ds.train.x, (ds.train.N, 1, 16, 16)) x_test = np.reshape(ds.test.x, (ds.test.N, 1, 16, 16)) model = kmodel.Sequential() model.add( kconv.Convolution2D(nb_filter=4, nb_row=5, nb_col=5, input_shape=(1, 16, 16), border_mode='valid')) model.add(klcore.Activation('tanh')) # instead of average pooling, we use max pooling model.add(kconv.MaxPooling2D(pool_size=(2, 2))) # the 12 feature maps in this layer are connected in a specific pattern to the below layer, but it is not possible # do this in keras easily. in fact, I don't know how keras connects the feature maps in one layer to the next. model.add(kconv.Convolution2D(nb_filter=12, nb_row=5, nb_col=5)) model.add(klcore.Activation('tanh')) model.add(kconv.MaxPooling2D(pool_size=(2, 2))) model.add(klcore.Flatten()) model.add(klcore.Dense(output_dim=10)) model.add(klcore.Activation('softmax'))
trainX = train[:, 1:].reshape(train.shape[0], 1, img_rows, img_cols) trainX = trainX.astype(float) trainX /= 255.0 trainY = kutils.to_categorical(train[:, 0]) nb_classes = trainY.shape[1] cnn = models.Sequential() cnn.add(conv.ZeroPadding2D( (1, 1), input_shape=(1, img_rows, img_cols), )) cnn.add(conv.Convolution2D(filters[0], kernel, kernel)) cnn.add(core.Activation('relu')) cnn.add(conv.MaxPooling2D(strides=(pool, pool))) cnn.add(conv.ZeroPadding2D((1, 1))) cnn.add(conv.Convolution2D(filters[1], kernel, kernel)) cnn.add(core.Activation('relu')) cnn.add(conv.MaxPooling2D(strides=(pool, pool))) cnn.add(conv.ZeroPadding2D((1, 1))) cnn.add(core.Flatten()) cnn.add(core.Dropout(0.5)) cnn.add(core.Dense(128)) cnn.add(core.Activation('relu'))
nb_filters_2 = 64 # 128 nb_filters_3 = 128 # 256 kernel_size = 5 trainX = train[:, 1:].reshape(train.shape[0], 1, img_rows, img_cols) trainX = trainX.astype(float) trainX /= 255.0 # preprocess the data trainY = kutils.to_categorical(train[:, 0]) nb_classes = trainY.shape[1] cnn = models.Sequential() cnn.add(conv.ZeroPadding2D((1, 1), input_shape=(1, img_rows, img_cols))) cnn.add( conv.Convolution2D(nb_filters_2, kernel_size, kernel_size, activation="relu")) cnn.add( conv.Convolution2D(nb_filters_2, kernel_size, kernel_size, activation="relu")) cnn.add(conv.MaxPooling2D(strides=(2, 2))) #cnn.add(conv.ZeroPadding2D((1, 1))) cnn.add( conv.Convolution2D(nb_filters_2, kernel_size, kernel_size, activation="relu")) cnn.add( conv.Convolution2D(nb_filters_2,
def convnet_alexnet_lion_keras(image_dims): # model = Sequential() # model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=image_dims)) NR_CLASSES = 6 input = layers.Input(shape=image_dims, name="Input") conv_1 = convolutional.Convolution2D(96, 11, 11, border_mode='valid', name="conv_1", activation='relu', init='glorot_uniform')(input) pool_1 = convolutional.MaxPooling2D(pool_size=(3, 3), name="pool_1")(conv_1) zero_padding_1 = convolutional.ZeroPadding2D(padding=(1, 1), name="zero_padding_1")(pool_1) conv_2 = convolutional.Convolution2D(256, 3, 3, border_mode='valid', name="conv_2", activation='relu', init='glorot_uniform')(zero_padding_1) pool_2 = convolutional.MaxPooling2D(pool_size=(3, 3), name="pool_2")(conv_2) zero_padding_2 = keras.layers.convolutional.ZeroPadding2D( padding=(1, 1), name="zero_padding_2")(pool_2) conv_3 = convolutional.Convolution2D(384, 3, 3, border_mode='valid', name="conv_3", activation='relu', init='glorot_uniform')(zero_padding_2) conv_4 = convolutional.Convolution2D(384, 3, 3, border_mode='valid', name="conv_4", activation='relu', init='glorot_uniform')(conv_3) conv_5 = convolutional.Convolution2D(256, 3, 3, border_mode='valid', name="conv_5", activation='relu', init='glorot_uniform')(conv_4) pool_3 = convolutional.MaxPooling2D(pool_size=(3, 3), name="pool_3")(conv_5) flatten = core.Flatten(name="flatten")(pool_3) fc_1 = core.Dense(4096, name="fc_1", activation='relu', init='glorot_uniform')(flatten) fc_1 = core.Dropout(0.5, name="fc_1_dropout")(fc_1) output = core.Dense(4096, name="Output", activation='relu', init='glorot_uniform')(fc_1) output = core.Dropout(0.5, name="Output_dropout")(output) fc_2 = core.Dense(NR_CLASSES, name="fc_2", activation='softmax', init='glorot_uniform')(output) return models.Model([input], [fc_2])
from sklearn import model_selection from keras import models, optimizers from keras.layers import convolutional, Lambda, ELU, pooling, core ### Load and split data df = pd.io.parsers.read_csv('driving_log.csv') train_data, valid_data = model_selection.train_test_split(df, test_size=.2) ### Cameras setting cameras = ['left', 'center', 'right'] cameras_steering_correction = [.25, 0., -.25] ### Train Model model = models.Sequential() model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=(32, 128, 3))) model.add(convolutional.Convolution2D(16, 3, 3, input_shape=(32, 128, 3))) model.add(ELU()) model.add(pooling.MaxPooling2D(pool_size=(2, 2))) model.add(convolutional.Convolution2D(32, 3, 3)) model.add(ELU()) model.add(pooling.MaxPooling2D(pool_size=(2, 2))) model.add(convolutional.Convolution2D(64, 3, 3)) model.add(pooling.MaxPooling2D(pool_size=(2, 2))) model.add(core.Flatten()) model.add(core.Dense(500)) model.add(ELU()) model.add(core.Dropout(.5)) model.add(core.Dense(100)) model.add(ELU()) model.add(core.Dropout(.25)) model.add(core.Dense(20))