def build_deep_autoencoder(img_shape, code_size): """ Deep autoencoding """ H,W,C = img_shape # encoder encoder = keras.models.Sequential() encoder.add(L.InputLayer(img_shape)) encoder.add(L.Conv2D(32, (3, 3), strides = (1, 1), padding="same", activation="elu")) encoder.add(L.MaxPooling2D((2, 2))) encoder.add(L.Conv2D(64, (3, 3), strides = (1, 1), padding="same", activation="elu")) encoder.add(L.MaxPooling2D((2, 2))) encoder.add(L.Conv2D(128, (3, 3), strides = (1, 1), padding="same", activation="elu")) encoder.add(L.MaxPooling2D((2, 2))) encoder.add(L.Conv2D(256, (3, 3), strides = (1, 1), padding="same", activation="elu")) encoder.add(L.MaxPooling2D((2, 2))) encoder.add(L.Flatten()) # flatten image to vector encoder.add(L.Dense(code_size)) # actual encoder # decoder decoder = keras.models.Sequential() decoder.add(L.InputLayer((code_size,))) decoder.add(L.Dense(2*2*256)) #actual encoder decoder.add(L.Reshape((2,2,256))) #un-flatten decoder.add(L.Conv2DTranspose(filters=128, kernel_size=(3, 3), strides=2, activation="elu", padding="same")) decoder.add(L.Conv2DTranspose(filters=64, kernel_size=(3, 3), strides=2, activation="elu", padding="same")) decoder.add(L.Conv2DTranspose(filters=32, kernel_size=(3, 3), strides=2, activation="elu", padding="same")) decoder.add(L.Conv2DTranspose(filters=3, kernel_size=(3, 3), strides=2, activation=None, padding="same")) return encoder, decoder
def create_pca_autoencoder(img_shape, lr, emb_size): """Creates a Linear PCA Autoencoder. Args: img_shape (tuple): Shape of input image. lr (float): Learning rate of the optimizer for training the autoencoder model. emb_size (int): No. of embedding dims for encoder output. Returns: keras.model.Model: The autoencoder model. """ # Encoder (Image -> Embedding) encoder = keras.models.Sequential() encoder.add(L.InputLayer(img_shape)) encoder.add(L.Flatten()) encoder.add(L.Dense(emb_size)) # Decoder (Embedding -> Image) decoder = keras.models.Sequential() decoder.add(L.InputLayer((emb_size, ))) decoder.add(L.Dense(np.prod(img_shape))) decoder.add(L.Reshape(img_shape)) return build_autoencoder(img_shape, encoder, decoder, lr=lr)
def build_deep_autoencoder(img_shape, code_size): """PCA's deeper brother. See instructions above. Use `code_size` in layer definitions.""" H,W,C = img_shape # encoder encoder = keras.models.Sequential() encoder.add(L.InputLayer(img_shape)) ### YOUR CODE HERE: define encoder as per instructions above ### encoder.add(L.Conv2D(32, (3, 3), strides = (1, 1), padding="same", activation='elu')) encoder.add(L.MaxPooling2D((2, 2))) encoder.add(L.Conv2D(64, (3, 3), strides = (1, 1), padding="same", activation='elu')) encoder.add(L.MaxPooling2D((2, 2))) encoder.add(L.Conv2D(128, (3, 3), strides = (1, 1), padding="same", activation='elu')) encoder.add(L.MaxPooling2D((2, 2))) encoder.add(L.Conv2D(256, (3, 3), strides = (1, 1), padding="same", activation='elu')) encoder.add(L.MaxPooling2D((2, 2))) encoder.add(L.Flatten()) #flatten image to vector encoder.add(L.Dense(code_size)) #actual encoder # decoder decoder = keras.models.Sequential() decoder.add(L.InputLayer((code_size,))) ### YOUR CODE HERE: define decoder as per instructions above ### decoder.add(L.Dense(2*2*256)) #actual encoder decoder.add(L.Reshape((2,2,256))) #un-flatten decoder.add(L.Conv2DTranspose(filters=128, kernel_size=(3, 3), strides=2, activation='elu', padding='same')) decoder.add(L.Conv2DTranspose(filters=64, kernel_size=(3, 3), strides=2, activation='elu', padding='same')) decoder.add(L.Conv2DTranspose(filters=32, kernel_size=(3, 3), strides=2, activation='elu', padding='same')) decoder.add(L.Conv2DTranspose(filters=3, kernel_size=(3, 3), strides=2, activation=None, padding='same')) return encoder, decoder
def test_merge_mul(self): z1 = ZLayer.InputLayer(input_shape=(3, 5)) z2 = ZLayer.InputLayer(input_shape=(3, 5)) zlayer = ZLayer.Merge(layers=[z1, z2], mode="mul") k1 = KLayer.InputLayer(input_shape=(3, 5)) k2 = KLayer.InputLayer(input_shape=(3, 5)) klayer = KLayer.Merge(layers=[k1, k2], mode="mul") input_data = [np.random.random([2, 3, 5]), np.random.random([2, 3, 5])] self.compare_layer(klayer, zlayer, input_data)
def test_merge_mul(self): b1 = BLayer.InputLayer(input_shape=(3, 5)) b2 = BLayer.InputLayer(input_shape=(3, 5)) blayer = BLayer.Merge(layers=[b1, b2], mode="mul") k1 = KLayer.InputLayer(input_shape=(3, 5)) k2 = KLayer.InputLayer(input_shape=(3, 5)) klayer = KLayer.Merge(layers=[k1, k2], mode="mul") input_data = [np.random.random([2, 3, 5]), np.random.random([2, 3, 5])] self.compare_newapi(klayer, blayer, input_data)
def test_merge_concat(self): z1 = ZLayer.InputLayer(input_shape=(2, 5, 11)) z2 = ZLayer.InputLayer(input_shape=(2, 5, 8)) zlayer = ZLayer.Merge(layers=[z1, z2], mode="concat") k1 = KLayer.InputLayer(input_shape=(2, 5, 11)) k2 = KLayer.InputLayer(input_shape=(2, 5, 8)) klayer = KLayer.Merge(layers=[k1, k2], mode="concat") input_data = [np.random.random([3, 2, 5, 11]), np.random.random([3, 2, 5, 8])] self.compare_layer(klayer, zlayer, input_data)
def build_deep_autoencoder(img_shape, code_size): """PCA's deeper brother. See instructions above. Use `code_size` in layer definitions.""" # encoder encoder = keras.models.Sequential() encoder.add(L.InputLayer(img_shape)) encoder.add( L.Conv2D(filters=32, kernel_size=3, padding='same', activation='elu')) encoder.add(L.MaxPooling2D(pool_size=2)) encoder.add( L.Conv2D(filters=64, kernel_size=3, padding='same', activation='elu')) encoder.add(L.MaxPooling2D(pool_size=2)) encoder.add( L.Conv2D(filters=128, kernel_size=3, padding='same', activation='elu')) encoder.add(L.MaxPooling2D(pool_size=2)) encoder.add( L.Conv2D(filters=256, kernel_size=3, padding='same', activation='elu')) encoder.add(L.MaxPooling2D(pool_size=2)) encoder.add(L.Flatten()) encoder.add(L.Dense(code_size)) # decoder decoder = keras.models.Sequential() decoder.add(L.InputLayer((code_size, ))) decoder.add(L.Dense(2 * 2 * 256)) decoder.add(L.Reshape((2, 2, 256))) decoder.add( L.Conv2DTranspose(filters=128, kernel_size=(3, 3), strides=2, activation='elu', padding='same')) decoder.add( L.Conv2DTranspose(filters=64, kernel_size=(3, 3), strides=2, activation='elu', padding='same')) decoder.add( L.Conv2DTranspose(filters=32, kernel_size=(3, 3), strides=2, activation='elu', padding='same')) decoder.add( L.Conv2DTranspose(filters=3, kernel_size=(3, 3), strides=2, activation=None, padding='same')) return encoder, decoder
def test_merge_max(self): b1 = BLayer.InputLayer(input_shape=(2, 5, 8)) b2 = BLayer.InputLayer(input_shape=(2, 5, 8)) blayer = BLayer.Merge(layers=[b1, b2], mode="max") k1 = KLayer.InputLayer(input_shape=(2, 5, 8)) k2 = KLayer.InputLayer(input_shape=(2, 5, 8)) klayer = KLayer.Merge(layers=[k1, k2], mode="max") input_data = [ np.random.random([3, 2, 5, 8]), np.random.random([3, 2, 5, 8]) ] self.compare_newapi(klayer, blayer, input_data)
def create_discriminator(self): """ Model to distinguish real images from generated ones.""" discriminator = Sequential() discriminator.add(L.InputLayer(self.IMG_SHAPE)) discriminator.add( L.Conv2D(16, kernel_size=(7, 7), padding='same', activation='elu')) discriminator.add( L.Conv2D(16, kernel_size=(7, 7), padding='same', activation='elu')) discriminator.add(L.AveragePooling2D(strides=2)) discriminator.add( L.Conv2D(32, kernel_size=(5, 5), padding='same', activation='elu')) discriminator.add( L.Conv2D(32, kernel_size=(5, 5), padding='same', activation='elu')) discriminator.add(L.AveragePooling2D(strides=2)) discriminator.add( L.Conv2D(64, kernel_size=(3, 3), padding='same', activation='elu')) discriminator.add( L.Conv2D(64, kernel_size=(3, 3), padding='same', activation='elu')) discriminator.add(L.AveragePooling2D(strides=2)) discriminator.add(L.Flatten()) discriminator.add(L.Dense(256, activation='tanh')) discriminator.add(L.Dense(2, activation=tf.nn.log_softmax)) self.discriminator = discriminator print('Discriminator created successfully.')
def FC_embedding(input_shape, embedding=False, hiddens=[500, 200], dropout=True, activations=['relu', 'relu']): """ For prototyping purposes a fully connected neural network with an embedding layer """ if len(hiddens) != len(activations): raise ValueError( 'Number of Hidden Layers must match the activations layer') if not embedding: raise ValueError( 'Can only use FC_embedding by supplying an keras.layers.Embedding() layer' ) model = Sequential() model.add(layers.InputLayer(input_shape=(input_shape, ))) model.add(embedding) model.add(layers.Flatten()) return basic_FNN(model, hiddens=hiddens, dropout=dropout, activations=activations)
def create_discriminator_model(): model = Sequential() model.add(L.InputLayer(input_shape = data_sample.IMG_SHAPE)) model.add(L.Conv2D(filters = 32, kernel_size = [3,3])) model.add(L.AveragePooling2D(pool_size = [2,2])) model.add(L.Activation(activation = 'elu')) model.add(L.Conv2D(filters = 64, kernel_size = [3,3])) model.add(L.AveragePooling2D(pool_size = [2,2])) model.add(L.Activation(activation = 'elu')) model.add(L.Flatten()) model.add(L.Dense(units = 256, activation = 'tanh')) model.add(L.Dense(units = 2, activation = tf.nn.log_softmax)) return model
def myModel(vocab_size, embedding_dim, maxlen, embedding_matrix, X_train, labels_train, X_test, labels_test): model2 = Sequential() model2.add(layers.InputLayer(input_shape=(100, ))) #taking advantage of the Embedding layer of keras #to initialize an embedding layer, three parameters are requires: input_dim which is the vocab size of the text data #output_dim which is the size of the output vectors for each word #input_length which is the length of the input sequences(sentences) #since i'm using GloVe, we'll use their pretrained weights and allow them to be trained to gain higher accuracy model2.add( layers.Embedding(input_dim=vocab_size, output_dim=embedding_dim, input_length=maxlen, weights=[embedding_matrix], trainable=True)) model2.add(layers.GlobalMaxPool1D()) model2.add(layers.Dense(10, activation='relu')) model2.add(layers.Dense(1, activation='sigmoid')) model2.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model2.save('model.h5') history = model2.fit(X_train, labels_train, epochs=100, verbose=False, validation_data=(X_test, labels_test), batch_size=10) model2.summary() loss, accuracy = model2.evaluate(X_train, labels_train, verbose=False) print("Training Accuracy: {:.4f}".format(accuracy)) loss, accuracy = model2.evaluate(X_test, labels_test, verbose=False) print("Testing Accuracy: {:.4f}".format(accuracy))
def tcn2d_model(inp, tcn_params, tcn_name, interpretable): if interpretable: out = inp else: out_inp = layers.InputLayer( input_shape=inp.get_shape().as_list()[1:], name='out_inp') out = out_inp.output out = TCN2D( nb_filters=tcn_params['filters'], dilations=tcn_params['dilations'], kernel_size=tcn_params['kernel_size'], dropout_rate=tcn_params['dropout_rate'], bias_constraint=None, padding=tcn_params['padding'], nb_stacks=tcn_params['nb_stacks'], lambda_layer=False, return_sequences=True, use_skip_connections=tcn_params['use_skip_connections'], name=tcn_name)(out) if interpretable: return out else: return models.Model( inputs=out_inp.input, outputs=out, name=tcn_name)(inp)
def __init__(self, env, alpha, epsilon, gamma, state_dim, n_action): self.alpha = alpha self.env = env self.epsilon = epsilon self.gamma = gamma self.state_dim = state_dim self.n_actions = n_actions with tf.device("/device:GPU:0"): config = tf.ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.2 self.__sess = tf.Session(config=config) keras.backend.set_session(self.__sess) self.__network = keras.models.Sequential() self.__network.add(L.InputLayer(state_dim)) self.__network.add(L.Dense(50, activation="relu")) self.__network.add(L.Dense(50, activation="relu")) self.__network.add(L.Dense(self.n_actions)) self.__states_ph = keras.backend.placeholder(dtype="float32", shape=(None, ) + self.state_dim) self.__actions_ph = keras.backend.placeholder(dtype="int32", shape=(None) + self.n_actions) self.__rewards_ph = keras.backend.placeholder(dtype='float32', shape=[None]) self.__next_states_ph = keras.backend.placeholder(dtype='float32', shape=(None, ) + self.state_dim) self.__is_done_ph = keras.backend.placeholder(dtype='bool', shape=[None]) #get q-values for all actions in current states self.__predicted_qvalues = self.__network(self.__states_ph) #select q-values for chosen actions self.__predicted_qvalues_for_actions = tf.reduce_sum( self.__predicted_qvalues * tf.one_hot(self.__actions_ph, self.n_actions), axis=1) self.__predicted_next_qvalues = self.__network( self.__next_states_ph) # compute V*(next_states) using predicted next q-values self.__next_state_values = tf.reduce_max( self.__predicted_next_qvalues, axis=1) # compute "target q-values" for loss - it's what's inside square parentheses in the above formula. self.__target_qvalues_for_actions = self.__rewards_ph + self.gamma * self.__next_state_values # at the last state we shall use simplified formula: Q(s,a) = r(s,a) since s' doesn't exist self.__target_qvalues_for_actions = tf.where( self.__is_done_ph, self.__rewards_ph, self.__target_qvalues_for_actions) self.__square_loss = ( self.__predicted_qvalues_for_actions - tf.stop_gradient(self.__target_qvalues_for_actions))**2 self.__loss = tf.reduce_mean(self.__square_loss) self.__train_step = tf.train.AdamOptimizer(self.alpha).minimize( self.__loss)
def build_model(input_shape, n_outputs, scale_depth=3, scale_width=8, **model_params): # clear already built graph K.clear_session() # prime dense layer with parameters activation = model_params.get("activation", layers.ReLU) regularizer = regularizers.l2(model_params.get("l2_lambda", 0)) primed_dense = partial(layers.Dense, 64 * scale_width, kernel_regularizer=regularizer) # build model model = models.Sequential() # input layer model.add(layers.InputLayer(input_shape=input_shape)) # hidden layers model.add(layers.Flatten()) for i in range(scale_depth): model.add(primed_dense()) # activation func model.add(activation()) if "dropout_prob" in model_params and model_params["dropout_prob"]: model.add(layers.Dropout(model_params["dropout_prob"])) # output layers model.add(layers.Dense(n_outputs, activation="softmax")) return model
def output_model_askbid(inp, params, output_shape, interpretable, **kwargs): if interpretable: out = inp else: out_inp = layers.InputLayer(input_shape=inp.get_shape().as_list()[1:], name='out_inp') out = out_inp.output out = layers.Cropping2D(cropping=((out.shape[1].value - 1, 0), (0, 0)), name=f'out_cropping')(out) out = layers.Reshape(target_shape=[i.value for i in out.shape[2:]], name='out_reshape')(out) out_ask = output_model_b(out, params, output_shape[0], interpretable=kwargs.get('interpretable_nested', True), name='ask') out_bid = output_model_b(out, params, output_shape[0], interpretable=kwargs.get('interpretable_nested', True), name='bid') out = layers.concatenate([out_ask, out_bid], name='out_concatenate') if interpretable: return out else: return models.Model(inputs=out_inp.input, outputs=out, name='out')(inp)
def output_model_b(inp, params, output_shape, interpretable, name=''): # h = params.get('output').get('h', output_shape) if interpretable: out = inp else: out_inp = layers.InputLayer(input_shape=inp.get_shape().as_list()[1:], name=f'out_{name}_inp') out = out_inp.output filters = params['output'].get('filters', None) for i, f in enumerate(filters): out = layers.Dense(f, name=f'out_{name}_dense{i}')(out) out = PReLU2(name=f'out_{name}_dense{i}_relu')(out) out = layers.BatchNormalization(name=f'out_{name}_dense{i}_bn')(out) out = layers.Flatten(name=f'out_{name}_flatten')(out) out_p = layers.Dense(output_shape, name=f'out_{name}_out_pos')(out) out_p = PReLU2(name=f'out_{name}_out_pos_relu')(out_p) out_n = layers.Lambda(lambda x: x * -1, name=f'out_{name}_out_neg0')(out) out_n = layers.Dense( output_shape, # activation='relu', name=f'out_{name}_out_neg')(out_n) out_n = PReLU2(name=f'out_{name}_out_neg_relu')(out_n) out = layers.Subtract(name=f'out_{name}_out')([out_p, out_n]) out = layers.Reshape(target_shape=out.get_shape().as_list()[1:] + [1], name=f'out_{name}_reshape')(out) if interpretable: return out else: return models.Model(inputs=out_inp.input, outputs=out, name=f'out_{name}')(inp)
def CNN(input_shape, filters=[64, 64], filter_sizes=[5, 5], dropout=True, batch_norm=True, maxpool_size=5, activation='relu'): """ Basic convoultion neural network without an embedding layer """ model = Sequential() model.add(layers.InputLayer(input_shape=(input_shape, ))) model.add(layers.Reshape((input_shape, 1))) model = basic_CNN(model, filters=filters, filter_sizes=filter_sizes, dropout=dropout, batch_norm=batch_norm, activation=activation) model.add(layers.Flatten()) model.add(layers.Dense(128)) model.add(layers.Activation(activation)) return model
def create_generator_model(): model = Sequential() model.add(L.InputLayer(input_shape=[data_sample.CODE_SIZE], name="noise")) model.add(L.Dense(8 * 8 * 10, activation='elu')) model.add(L.Reshape([8, 8, 10])) model.add(L.Deconv2D(filters=64, kernel_size=[5, 5], activation='elu')) model.add(L.Deconv2D(filters=64, kernel_size=[5, 5], activation='elu')) model.add(L.UpSampling2D(size=[2, 2])) model.add(L.Deconv2D(filters=32, kernel_size=[3, 3], activation='elu')) model.add(L.Deconv2D(filters=32, kernel_size=[3, 3], activation='elu')) model.add(L.Deconv2D(filters=32, kernel_size=[3, 3], activation='elu')) model.add(L.Conv2D(filters=3, kernel_size=[3, 3], activation=None)) return model
def _get_base_model(image_size): model = keras.models.Sequential() model.add(L.InputLayer(input_shape=[image_size, image_size, 1])) model.add(L.Conv2D(filters=200, kernel_size=(3, 3), strides=3)) model.add(L.BatchNormalization()) model.add(L.Activation('relu')) model.add(L.Conv2D(filters=80, kernel_size=(3, 3))) model.add(L.MaxPooling2D(pool_size=(2, 2))) model.add(L.Activation('relu')) model.add(L.Dropout(0.15)) model.add(L.Conv2D(filters=80, kernel_size=(4, 4))) model.add(L.MaxPooling2D(pool_size=(2, 2))) model.add(L.Activation('relu')) model.add(L.Dropout(0.15)) model.add(L.Conv2D(filters=20, kernel_size=(5, 5))) model.add(L.BatchNormalization()) model.add(L.Activation('relu')) model.add(L.Flatten()) model.add(L.Dense(600, activation='relu')) model.add(L.Dropout(0.15)) model.add(L.Dense(200, activation='relu')) model.add(L.Dropout(0.15)) return model
def create_model(trial): # We optimize the number of layers, hidden units and dropout in each layer and # the learning rate of RMSProp optimizer. # We define our myconv2d model = Sequential() model.add(layers.InputLayer(input_shape=input_shape)) n_conv2d_layers = trial.suggest_int('n_conv2d_layers', 1, 4) for i in range(n_conv2d_layers): num_filters = trial.suggest_int('n_filters_|{}'.format(i), 8, 64) model.add( Conv2D(filters=32, kernel_size=3, strides=1, padding='same', activation='relu')) model.add(Flatten()) n_layers = trial.suggest_int('n_layers', 1, 2) for i in range(n_layers): num_hidden = int( trial.suggest_loguniform('n_units_l{}'.format(i), 50, 1000)) model.add(Dense(num_hidden, activation='relu')) #dropout = trial.suggest_uniform('dropout_l{}'.format(i), 0.2, 0.5) #model.add(Dropout(rate=dropout)) model.add(Dense(n, activation='softmax')) # We compile our model with a sampled learning rate. lr = trial.suggest_loguniform('lr', 1e-5, 1e-1) model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.RMSprop(lr=lr), metrics=['accuracy']) return model
def fe1o(input, params, interpretable): if interpretable: out = input else: inp = layers.InputLayer(input_shape=input.get_shape().as_list()[1:], name='inp0') out = inp.output dim = out.get_shape().as_list() out = layers.Reshape([dim[1], dim[2] // 2, 2])(out) out = layers.Lambda(lambda x: x[:, :, :2, :1], name=f'fe_lambda_ask_bid_price')(out) out = layers.BatchNormalization(axis=bn_axis, name='fe_bn0')(out) out1 = layers.Conv2D( filters=params['features']['filters'][1], kernel_size=(1, 1), padding='valid', activation='relu', # 'relu', bias_constraint=eval(params['features']['bias_constraint']), name='fe_order_l1a')(out) out2 = layers.Conv2D( filters=params['features']['filters'][1], kernel_size=(1, 2), padding='same', activation='relu', # 'relu', bias_constraint=eval(params['features']['bias_constraint']), name='fe_order_l1b')(out) out = layers.Concatenate(axis=-2, name='fe_order_concat')([out1, out2]) out = layers.BatchNormalization(axis=bn_axis, name='fe_out_bn')(out) out = inception2D(out, 128 // 4, f'fe_out_inc', eval(params['output']['bias_constraint'])) if interpretable: return out else: return models.Model(inputs=inp.input, outputs=out, name='fe0o')(input)
def feedForwardNetwork(nHiddenUnits, removeProb, xData, yData, nInput, goTraining, filename, theSeed, batchSize, epochSize, activationFunction): if goTraining: tf.reset_default_graph() config1 = tf.ConfigProto() config1.intra_op_parallelism_threads = 4 config1.inter_op_parallelism_threads = 4 sess = tf.Session(config=config1) kB.set_session(sess) theModel = krs.models.Sequential() theModel.add(kLayers.InputLayer(input_shape=(nInput, ))) theModel.add(kLayers.Dropout(rate=removeProb[0])) if nHiddenUnits: for i in range(0, len(nHiddenUnits)): theModel.add(kLayers.Dense(nHiddenUnits[i],activation=activationFunction,use_bias=True,\ kernel_initializer=krs.initializers.RandomNormal(mean=0.0, stddev=0.1, seed=theSeed),\ bias_initializer=krs.initializers.RandomNormal(mean=0.0, stddev=0.1, seed=theSeed))) theModel.add(kLayers.Dropout(rate=removeProb[i + 1], seed=theSeed)) theModel.add(kLayers.Dense(1,use_bias=True,kernel_initializer=krs.initializers.RandomNormal(mean=0.0, stddev=0.1, seed=theSeed),\ bias_initializer=krs.initializers.RandomNormal(mean=0.0, stddev=0.1, seed=theSeed))) if goTraining: theOptimizer = krs.optimizers.Nadam(lr=0.001) theModel.compile(loss='mean_squared_error', optimizer=theOptimizer, metrics=['mean_squared_error']) theModel.fit(x=xData, y=yData, batch_size=batchSize, epochs=epochSize) theModel.save_weights(filename) else: theModel.load_weights(filename) return theModel
def make_model_full(inshape, num_classes, weights_file=None): model = Sequential() model.add(KL.InputLayer(input_shape=inshape[1:])) # model.add(KL.Conv2D(32, (3, 3), padding='same', input_shape=inshape[1:])) model.add(KL.Conv2D(32, (3, 3), padding='same')) model.add(KL.Activation('relu')) model.add(KL.Conv2D(32, (3, 3))) model.add(KL.Activation('relu')) model.add(KL.MaxPooling2D(pool_size=(2, 2))) model.add(KL.Dropout(0.25)) model.add(KL.Conv2D(64, (3, 3), padding='same')) model.add(KL.Activation('relu')) model.add(KL.Conv2D(64, (3, 3))) model.add(KL.Activation('relu')) model.add(KL.MaxPooling2D(pool_size=(2, 2))) model.add(KL.Dropout(0.25)) model.add(KL.Flatten()) model.add(KL.Dense(512)) model.add(KL.Activation('relu')) model.add(KL.Dropout(0.5)) model.add(KL.Dense(num_classes)) model.add(KL.Activation('softmax')) if weights_file is not None and os.path.exists(weights_file): model.load_weights(weights_file) return model
def build_deep_autoencoder(img_shape, code_size): """PCA's deeper brother. See instructions above. Use `code_size` in layer definitions.""" H,W,C = img_shape # encoder encoder = keras.models.Sequential() encoder.add(L.InputLayer(img_shape)) ### YOUR CODE HERE: define encoder as per instructions above ### # decoder decoder = keras.models.Sequential() decoder.add(L.InputLayer((code_size,))) ### YOUR CODE HERE: define decoder as per instructions above ### return encoder, decoder
def build_pca_autoencoder(img_shape, code_size): """ Here we define a simple linear autoencoder as described above. We also flatten and un-flatten data to be compatible with image shapes """ encoder = keras.models.Sequential() encoder.add(L.InputLayer(img_shape)) encoder.add(L.Flatten()) #flatten image to vector encoder.add(L.Dense(code_size)) #actual encoder decoder = keras.models.Sequential() decoder.add(L.InputLayer((code_size,))) decoder.add(L.Dense(np.prod(img_shape))) #actual decoder, height*width*3 units decoder.add(L.Reshape(img_shape)) #un-flatten return encoder,decoder
def make_model(x_train_input, nclasses): '''Non-functional model definition.''' model = Sequential() model.add(KL.InputLayer(input_tensor=x_train_input)) ll = cnn_layers_list(nclasses) for il in ll: model.add(il) return model
def get_model(name: str): if name == 'dense': return keras.Sequential([ layers.InputLayer(input_shape=(85,)), layers.Dense(70, name='hidden1', activation='relu'), layers.Dense(60, name='hidden2', activation='relu'), layers.Dense(30, name='hidden3', activation='relu'), layers.Dense(10, name='output', activation='softmax') ], 'poker_predictor') if name == 'conv': return keras.Sequential([ layers.InputLayer(input_shape=(5, 17, 1)), layers.Conv2D(10, (5, 5), name='conv'), layers.GlobalMaxPooling2D(), layers.Dense(20, name='hidden1', activation='relu'), layers.Dense(20, name='hidden2', activation='relu'), layers.Dense(10, name='output', activation='softmax') ], 'poker_predictor')
def make_model_full(train_input, num_classes, weights_file=None): '''Return Cifar10 DL model with many layers. :param train_input: Either a tf.Tensor input placeholder/pipeline, or a tuple input shape. ''' model = Sequential() # model.add(KL.InputLayer(input_shape=inshape[1:])) if isinstance(train_input, tf.Tensor): model.add(KL.InputLayer(input_tensor=train_input)) else: model.add(KL.InputLayer(input_shape=train_input)) # if standardize: # model.add(KL.Lambda(stand_img)) model.add(KL.Conv2D(32, (3, 3), padding='same')) model.add(KL.Activation('relu')) model.add(KL.Conv2D(32, (3, 3))) model.add(KL.Activation('relu')) model.add(KL.MaxPooling2D(pool_size=(2, 2))) model.add(KL.Dropout(0.25)) model.add(KL.Conv2D(64, (3, 3), padding='same')) model.add(KL.Activation('relu')) model.add(KL.Conv2D(64, (3, 3))) model.add(KL.Activation('relu')) model.add(KL.MaxPooling2D(pool_size=(2, 2))) model.add(KL.Dropout(0.25)) model.add(KL.Flatten()) model.add(KL.Dense(512)) model.add(KL.Activation('relu')) model.add(KL.Dropout(0.5)) model.add(KL.Dense(num_classes)) model.add(KL.Activation('softmax')) if weights_file is not None and os.path.exists(weights_file): model.load_weights(weights_file) return model
def LSTM_embedding(input_shape, dropW=0.2, dropU=0.2, n_dims=128, embedding=None): assert embedding is not None, 'must supply embedding!' model = Sequential() model.add(layers.InputLayer(input_shape=(input_shape, ))) model.add(embedding) model.add(layers.LSTM(n_dims, dropout_W=dropW, dropout_U=dropU)) return model