def model_factory(n_classes, n_dims):
    print("Building model...")

    lmbd1 = 0
    lmbd2 = 0

    model = Sequential()
    model.add(Dense(n_dims, 1024, init='glorot_uniform',
                    W_regularizer=l1l2(lmbd1, lmbd2)))
    model.add(PReLU((1024,)))
    model.add(BatchNormalization((1024,)))
    model.add(Dropout(0.5))

    model.add(Dense(1024, 512, init='glorot_uniform',
                    W_regularizer=l1l2(lmbd1, lmbd2)))
    model.add(PReLU((512,)))
    model.add(BatchNormalization((512,)))
    model.add(Dropout(0.5))

    model.add(Dense(512, 256, init='glorot_uniform',
                    W_regularizer=l1l2(lmbd1, lmbd2)))
    model.add(PReLU((256,)))
    model.add(BatchNormalization((256,)))
    model.add(Dropout(0.5))

    model.add(Dense(256, n_classes, init='glorot_uniform'))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer="adam")

    return model
Exemple #2
0
def rnn_1(DIM = 0, HIDDEN_SIZE = 0 , DROPOUT = 0, LAYERS = 1, UNIT = 'lstm', MAX_Q = 0, MAX_A = 0, FINETUNE = False, embedding_weights = None):
	RNN = RNN_UNIT[UNIT]
	model = Graph()

	if FINETUNE:
		model.add_input(name = 'q', input_shape = (None,), dtype = 'int64')
		model.add_input(name = 'a', input_shape = (MAX_A,), dtype = 'int64')
		VOCAB = embedding_weights.shape[0]
		EMB_HIDDEN_SIZE = embedding_weights.shape[1]
		model.add_node(Embedding(VOCAB,EMB_HIDDEN_SIZE,mask_zero=True, weights=[embedding_weights], input_length = MAX_Q ), name = 'q_e', input = 'q')
		model.add_node(Embedding(VOCAB,EMB_HIDDEN_SIZE,mask_zero=True, weights=[embedding_weights], input_length = MAX_A ), name = 'a_e', input = 'a')
		prev_q = 'q_e'
		prev_a = 'a_e'
	else:
		model.add_input(name = 'q', input_shape = (None,DIM))
		model.add_input(name = 'a', input_shape = (MAX_A,DIM))
		prev_q = 'q'
		prev_a = 'a'

	for layer in xrange(LAYERS-1):
		model.add_node(RNN(HIDDEN_SIZE, return_sequences=True), name = 'q_rnn_' + str(layer), input = prev_q)
		prev_q = 'q_rnn_' + str(layer)

	model.add_node(RNN(HIDDEN_SIZE, return_sequences=False), name = 'q_rnn_' + str(LAYERS), input = prev_q)
	model.add_node(RepeatVector(MAX_A), input = 'q_rnn_' + str(LAYERS), name = 'q_rv')

	for layer in xrange(LAYERS-1):
		if layer == 0:
			model.add_node(RNN(HIDDEN_SIZE, return_sequences=True), name = 'a_rnn_0', inputs = ['q_rv' , prev_a], merge_mode = 'concat', concat_axis = -1)
			prev_a = 'a_rnn_0'
			continue
		model.add_node(RNN(HIDDEN_SIZE, return_sequences=True), name = 'a_rnn_' + str(layer), input = prev_a)
		prev_a = 'a_rnn_' + str(layer)

	if LAYERS == 1:
		model.add_node(RNN(HIDDEN_SIZE, return_sequences=False), name = 'a_rnn_0', inputs = ['q_rv' , prev_a], merge_mode = 'concat', concat_axis = -1)
	else:
		model.add_node(RNN(HIDDEN_SIZE, return_sequences=False),name = 'a_rnn_' + str(LAYERS-1), input = prev_a)

	model.add_node(Dense(HIDDEN_SIZE,  activation = 'relu', W_regularizer = l1l2(l1 = 0.00001, l2 = 0.00001)), name = 'dense_0', input = 'a_rnn_' + str(LAYERS-1))
	model.add_node(Dropout(DROPOUT), name = 'dropout_0', input = 'dense_0')

	prev_d = 'dropout_0'
	for layer in xrange(LAYERS-1):
		model.add_node(Dense(HIDDEN_SIZE, activation = 'relu', W_regularizer = l1l2(l1 = 0.00001, l2 = 0.00001)), name = 'dense_' + str(layer+1), input = prev_d)
		model.add_node(Dropout(DROPOUT), name = 'dropout_' + str(layer+1), input = 'dense_' + str(layer+1))
		prev_d = 'dropout_' + str(layer+1)

	model.add_node(Dense(1, activation = 'sigmoid'), name = 'sigmoid', input = prev_d)
	model.add_output(name = 'o', input = 'sigmoid')
	return model
def make_network(
        n_dims,
        output_activation="linear",
        hidden_activation="relu",
        hidden_layer_sizes=None,
        dropout_probability=0,
        optimizer="adam",
        init="glorot_normal",
        l1_penalty=0,
        l2_penalty=0):
    if not hidden_layer_sizes:
        # start with a layer larger than the input vector and its
        # mask of missing values and then transform down to a layer
        # which is smaller than the input -- a bottleneck to force
        # generalization
        hidden_layer_sizes = [
            min(2000, 8 * n_dims),
            min(500, 2 * n_dims),
            int(np.ceil(0.5 * n_dims)),
        ]
        print("Hidden layer sizes: %s" % (hidden_layer_sizes,))

    nn = Sequential()
    first_layer_size = hidden_layer_sizes[0]
    nn.add(Dense(
        first_layer_size,
        input_dim=2 * n_dims,
        activation=hidden_activation,
        W_regularizer=l1l2(l1_penalty, l2_penalty),
        init=init))
    nn.add(Dropout(dropout_probability))

    for layer_size in hidden_layer_sizes[1:]:
        nn.add(Dense(
            layer_size,
            activation=hidden_activation,
            W_regularizer=l1l2(l1_penalty, l2_penalty),
            init=init))
        nn.add(Dropout(dropout_probability))
    nn.add(
        Dense(
            n_dims,
            activation=output_activation,
            W_regularizer=l1l2(l1_penalty, l2_penalty),
            init=init))
    loss_function = make_reconstruction_loss(
        n_dims,
        mask_indicates_missing_values=True)
    nn.compile(optimizer=optimizer, loss=loss_function)
    return nn
def model_generator(latent_dim,
                    nch=512,
                    dropout=0.5,
                    reg=lambda: l1l2(l1=1e-7, l2=1e-7)):
    model = Sequential(name="decoder")
    h = 5
    model.add(
        Dense(input_dim=latent_dim,
              output_dim=nch * 4 * 4,
              W_regularizer=reg()))
    model.add(Reshape(dim_ordering_shape((nch, 4, 4))))
    model.add(SpatialDropout2D(dropout))
    model.add(LeakyReLU(0.2))
    model.add(
        Convolution2D(nch / 2, h, h, border_mode='same', W_regularizer=reg()))
    model.add(SpatialDropout2D(dropout))
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(
        Convolution2D(nch / 2, h, h, border_mode='same', W_regularizer=reg()))
    model.add(SpatialDropout2D(dropout))
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(
        Convolution2D(nch / 4, h, h, border_mode='same', W_regularizer=reg()))
    model.add(SpatialDropout2D(dropout))
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(3, h, h, border_mode='same', W_regularizer=reg()))
    model.add(Activation('sigmoid'))
    return model
def model_discriminator():
	nch = 256
	h = 5
	reg = lambda: l1l2(l1=1e-7, l2=1e-7)
	c1 = Convolution2D(nch / 4, h, h, border_mode='same', W_regularizer=reg(),
	input_shape=dim_ordering_shape((3, 32, 32)))
	c2 = Convolution2D(nch / 2, h, h, border_mode='same', W_regularizer=reg())
	c3 = Convolution2D(nch, h, h, border_mode='same', W_regularizer=reg())
	c4 = Convolution2D(1, h, h, border_mode='same', W_regularizer=reg())
	def m(dropout):
		model = Sequential()
		model.add(c1)
		model.add(SpatialDropout2D(dropout))
		model.add(MaxPooling2D(pool_size=(2, 2)))
		model.add(LeakyReLU(0.2))
		model.add(c2)
		model.add(SpatialDropout2D(dropout))
		model.add(MaxPooling2D(pool_size=(2, 2)))
		model.add(LeakyReLU(0.2))
		model.add(c3)
		model.add(SpatialDropout2D(dropout))
		model.add(MaxPooling2D(pool_size=(2, 2)))
		model.add(LeakyReLU(0.2))
		model.add(c4)
		model.add(AveragePooling2D(pool_size=(4, 4), border_mode='valid'))
		model.add(Flatten())
		model.add(Activation('sigmoid'))
		return model
	return m
def model_discriminator(latent_dim, input_shape, output_dim=1, hidden_dim=2048,
                        reg=lambda: l1l2(1e-7, 1e-7), batch_norm_mode=1, dropout=0.5):
    z = Input((latent_dim,))
    x = Input(input_shape, name="x")
    h = merge([z, Flatten()(x)], mode='concat')

    h1 = Dense(hidden_dim, name="discriminator_h1", W_regularizer=reg())
    b1 = BatchNormalization(mode=batch_norm_mode)
    h2 = Dense(hidden_dim, name="discriminator_h2", W_regularizer=reg())
    b2 = BatchNormalization(mode=batch_norm_mode)
    h3 = Dense(hidden_dim, name="discriminator_h3", W_regularizer=reg())
    b3 = BatchNormalization(mode=batch_norm_mode)
    y = Dense(output_dim, name="discriminator_y", activation="sigmoid", W_regularizer=reg())

    # training model uses dropout
    _h = h
    _h = Dropout(dropout)(LeakyReLU(0.2)((b1(h1(_h)))))
    _h = Dropout(dropout)(LeakyReLU(0.2)((b2(h2(_h)))))
    _h = Dropout(dropout)(LeakyReLU(0.2)((b3(h3(_h)))))
    ytrain = y(_h)
    mtrain = Model([z, x], ytrain, name="discriminator_train")

    # testing model does not use dropout
    _h = h
    _h = LeakyReLU(0.2)((b1(h1(_h))))
    _h = LeakyReLU(0.2)((b2(h2(_h))))
    _h = LeakyReLU(0.2)((b3(h3(_h))))
    ytest = y(_h)
    mtest = Model([z, x], ytest, name="discriminator_test")

    return mtrain, mtest
Exemple #7
0
def test_W_reg():
    for reg in [regularizers.identity(), regularizers.l1(), regularizers.l2(), regularizers.l1l2()]:
        model = create_model(weight_reg=reg)
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.fit(X_train, Y_train, batch_size=batch_size,
                  nb_epoch=nb_epoch, verbose=0)
        model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0)
def make_network(n_dims,
                 output_activation="linear",
                 hidden_activation="relu",
                 hidden_layer_sizes=None,
                 dropout_probability=0,
                 optimizer="adam",
                 init="glorot_normal",
                 l1_penalty=0,
                 l2_penalty=0):
    if not hidden_layer_sizes:
        # start with a layer larger than the input vector and its
        # mask of missing values and then transform down to a layer
        # which is smaller than the input -- a bottleneck to force
        # generalization
        hidden_layer_sizes = [
            min(2000, 8 * n_dims),
            min(500, 2 * n_dims),
            int(np.ceil(0.5 * n_dims)),
        ]
        print("Hidden layer sizes: %s" % (hidden_layer_sizes, ))

    nn = Sequential()
    first_layer_size = hidden_layer_sizes[0]
    nn.add(
        Dense(first_layer_size,
              input_dim=2 * n_dims,
              activation=hidden_activation,
              W_regularizer=l1l2(l1_penalty, l2_penalty),
              init=init))
    nn.add(Dropout(dropout_probability))

    for layer_size in hidden_layer_sizes[1:]:
        nn.add(
            Dense(layer_size,
                  activation=hidden_activation,
                  W_regularizer=l1l2(l1_penalty, l2_penalty),
                  init=init))
        nn.add(Dropout(dropout_probability))
    nn.add(
        Dense(n_dims,
              activation=output_activation,
              W_regularizer=l1l2(l1_penalty, l2_penalty),
              init=init))
    loss_function = make_reconstruction_loss(
        n_dims, mask_indicates_missing_values=True)
    nn.compile(optimizer=optimizer, loss=loss_function)
    return nn
Exemple #9
0
def create_model(nr, nr2, dr1, dr2, dr3, lr, wll1, w1l2):
    # create model
    #L=WinnerTakeAll1D_GaborMellis(spatial=1, OneOnX=WTAX)

    input_grid = Input(shape=(16, ))

    x1 = Dense(nr,
               init='normal',
               activation='relu',
               W_regularizer=l1l2(l1=w1l1, l2=w1l2))(input_grid)
    x2 = Dropout(dr1)(x1)

    x3 = Dense(nr, activation='relu')(x2)
    x3 = Dense(nr, activation='relu')(x3)
    x4 = merge([x3, x2], mode='sum')
    x4 = Dropout(dr2)(x4)

    x5 = Dense(nr, activation='relu')(x4)
    x5 = Dense(nr, activation='relu')(x5)
    x6 = merge([x5, x4], mode='sum')
    x6 = Dropout(dr2)(x6)

    x7 = Dense(nr, activation='relu')(x6)
    x7 = Dense(nr, activation='relu')(x6)
    x8 = merge([x7, x6], mode='sum')
    x8 = Dropout(dr2)(x8)

    x9 = Dense(nr, activation='relu')(x8)
    x9 = Dense(nr, activation='relu')(x9)
    x10 = merge([x9, x8], mode='sum')
    x10 = Dropout(dr2)(x10)

    x11 = Dense(nr, activation='relu')(x10)
    x11 = Dense(nr, activation='relu')(x11)
    x12 = merge([x11, x10], mode='sum')
    x12 = Dropout(dr2)(x12)

    x13 = Dense(nr, activation='relu')(x12)
    x13 = Dense(nr, activation='relu')(x13)
    x13 = merge([x13, x12], mode='sum')
    x12 = Dropout(dr2)(x13)

    x14 = Dense(nr, activation='relu')(x13)
    x14 = Dense(nr, activation='relu')(x14)
    x14 = merge([x14, x13], mode='sum')
    x14 = Dropout(dr2)(x14)

    x15 = Dense(nr2, activation='relu')(x14)
    x15 = Dropout(dr3)(x15)

    output = Dense(2, activation='softmax')(x15)

    model = Model(input_grid, output)

    admax = Adamax(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0)
    model.compile(optimizer=admax,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])  # Gradient descent
    return model
Exemple #10
0
def rnn_2(DIM = 0, HIDDEN_SIZE = 0 , DROPOUT = 0, LAYERS = 1, UNIT = 'lstm', MAX_Q = 0, MAX_A = 0, FINETUNE = False, embedding_weights = None):
	LAYERS = max(LAYERS,min(2,LAYERS))
	RNN = RNN_UNIT[UNIT]
	model = Graph()

	if FINETUNE:
		model.add_input(name = 'q', input_shape = (None,), dtype = 'int64')
		model.add_input(name = 'a', input_shape = (None,), dtype = 'int64')
		VOCAB = embedding_weights.shape[0]
		EMB_HIDDEN_SIZE = embedding_weights.shape[1]
		model.add_node(Embedding(VOCAB,EMB_HIDDEN_SIZE,mask_zero=True, weights=[embedding_weights]), name = 'q_e', input = 'q')
		model.add_node(Embedding(VOCAB,EMB_HIDDEN_SIZE,mask_zero=True, weights=[embedding_weights]), name = 'a_e', input = 'a')
		prev_q = 'q_e'
		prev_a = 'a_e'

	else:
		model.add_input(name = 'q', input_shape = (None,DIM))
		model.add_input(name = 'a', input_shape = (None,DIM))
		prev_q = 'q'
		prev_a = 'a'

	model.add_node(RNN(HIDDEN_SIZE, return_sequences = True), name='recurrent_context', input = prev_a)

	for layer in xrange(LAYERS - 2):
		model.add_node(RNN(HIDDEN_SIZE, return_sequences=True), name = 'q_rnn_' + str(layer+1), input = prev_q)
		prev_q = 'q_rnn_' + str(layer+1)
	model.add_node(RNN(HIDDEN_SIZE, return_sequences = True), name = 'encoder_context', input = prev_q)
	model.add_node(TimeDistributedAttention(prev_dim = HIDDEN_SIZE, att_dim = HIDDEN_SIZE, return_sequences = True, prev_context = False), name='attention', inputs=['encoder_context','recurrent_context'], merge_mode = 'join_att')

	prev_a = 'attention'
	for layer in xrange(LAYERS - 2):
		model.add_node(RNN(HIDDEN_SIZE, return_sequences=True), name = 'a_rnn_' + str(layer+1), input = prev_a)
		prev_a = 'a_rnn_' + str(layer+1)
	model.add_node(RNN(HIDDEN_SIZE, return_sequences=False),name = 'a_rnn_' + str(LAYERS), input = prev_a)
	model.add_node(Dense(HIDDEN_SIZE,  activation = 'relu', W_regularizer = l1l2(l1 = 0.00001, l2 = 0.00001)), name = 'dense_0', input = 'a_rnn_' + str(LAYERS))
	model.add_node(Dropout(DROPOUT), name = 'dropout_0', input = 'dense_0')

	prev_d = 'dropout_0'
	for layer in xrange(LAYERS-1):
		model.add_node(Dense(HIDDEN_SIZE, activation = 'relu', W_regularizer = l1l2(l1 = 0.00001, l2 = 0.00001)), name = 'dense_' + str(layer+1), input = prev_d)
		model.add_node(Dropout(DROPOUT), name = 'dropout_' + str(layer+1), input = 'dense_' + str(layer+1))
		prev_d = 'dropout_' + str(layer+1)

	model.add_node(Dense(1, activation = 'sigmoid'), name = 'sigmoid', input = prev_d)
	model.add_output(name = 'o', input = 'sigmoid')
	return model
def regularizer(params):
    if 'l1' in params and 'l2' in params:
        return regularizers.l1l2(params['l1'], params['l2'])
    elif 'l1' in params:
        return regularizers.l1(params['l1'])
    elif 'l2' in params:
        return regularizers.l2(params['l2'])
    else:
        return None
Exemple #12
0
def get_regularizer(lambda_l1=None, lambda_l2=None):
    regularizer = None
    if lambda_l1 is None and lambda_l2 is not None:
        regularizer = l2(l=lambda_l2)
    elif lambda_l1 is not None and lambda_l2 is None:
        regularizer = l1(l=lambda_l1)
    elif lambda_l1 is not None and lambda_l2 is not None:
        regularizer = l1l2(l1=lambda_l1, l2=lambda_l2)
    return regularizer
Exemple #13
0
def build_model(in_dim, out_dim=1,
                n_hidden=100, l1_norm=0.0,
                l2_norm=0,
                n_deep=5, drop=0.1,
                learning_rate=0.1):
    model = Sequential()
    # Input layer
    model.add(Dense(
        input_dim=in_dim,
        output_dim=n_hidden,
        init='glorot_normal',
        activation='tanh',
        W_regularizer=l1l2(l1=l1_norm, l2=l2_norm)))

    # do X layers
    for layer in range(n_deep-1):
        model.add(Dropout(drop))
        model.add(Dense(
            output_dim=np.round(n_hidden/2**(layer+1)),
            init='glorot_normal',
            activation='tanh',
            W_regularizer=l1l2(l1=l1_norm, l2=l2_norm)))

    # Output layer
    if out_dim == 1:
        activation = 'tanh'
    else:
        activation = 'softmax'

    model.add(Dense(out_dim,
                    init='glorot_normal',
                    activation=activation))

    # Optimization algorithms
    opt = Adadelta(lr=learning_rate)
    if out_dim == 1:
        model.compile(loss='binary_crossentropy',
                      optimizer=opt)
    else:
        model.compile(loss='categorical_crossentropy',
                      optimizer=opt)

    return model
def model_discriminator(latent_dim, output_dim=1, hidden_dim=512,
                        reg=lambda: l1l2(1e-7, 1e-7)):
    z = Input((latent_dim,))
    h = z
    h = Dense(hidden_dim, name="discriminator_h1", W_regularizer=reg())(h)
    h = LeakyReLU(0.2)(h)
    h = Dense(hidden_dim, name="discriminator_h2", W_regularizer=reg())(h)
    h = LeakyReLU(0.2)(h)
    y = Dense(output_dim, name="discriminator_y", activation="sigmoid", W_regularizer=reg())(h)
    return Model(z, y)
def test_W_reg():
    for reg in [regularizers.l1(), regularizers.l2(), regularizers.l1l2()]:
        model = create_model(weight_reg=reg)
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.fit(X_train,
                  Y_train,
                  batch_size=batch_size,
                  nb_epoch=nb_epoch,
                  verbose=0)
        model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0)
Exemple #16
0
def model_discriminator(latent_dim, output_dim=1, hidden_dim=512,
                        reg=lambda: l1l2(1e-7, 1e-7)):
    z = Input((latent_dim,))
    h = z
    h = Dense(hidden_dim, name="discriminator_h1", W_regularizer=reg())(h)
    h = LeakyReLU(0.2)(h)
    h = Dense(hidden_dim, name="discriminator_h2", W_regularizer=reg())(h)
    h = LeakyReLU(0.2)(h)
    y = Dense(output_dim, name="discriminator_y", activation="sigmoid", W_regularizer=reg())(h)
    return Model(z, y)
def create_model_test():

	model = Sequential()
	
	
	model.add(Dense(280, input_dim=16, init='normal', activation='relu' ,W_regularizer=l1l2(l1=5e-06, l2=5e-06), activity_regularizer=l1l2(l1=0, l2=1e-5))) #W_regularizer=l1(0.000001), activity_regularizer=activity_l1(0.000001)))
	model.add(Dropout(0.25))
	model.add(Dense(370,  activation ='relu',activity_regularizer=l1l2(l1=0, l2=5e-5)))
	model.add(Dropout(0.5))
	model.add(Dense(120,  activation ='relu',W_regularizer=l1l2(l1=0, l2=5e-06)))
	model.add(Dropout(0.55))
	
	model.add(Dense(1))
		
	model.add(Activation('sigmoid'))

	admax = Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)#decay ? 0.002
	
	model.compile(optimizer=admax, loss='binary_crossentropy', metrics=['accuracy']) # Gradient descent
	return model
Exemple #18
0
def test_W_reg():
    (X_train, Y_train), (X_test, Y_test), test_ids = get_data()
    for reg in [regularizers.l1(),
                regularizers.l2(),
                regularizers.l1l2()]:
        model = create_model(weight_reg=reg)
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        assert len(model.losses) == 1
        model.fit(X_train, Y_train, batch_size=batch_size,
                  nb_epoch=nb_epoch, verbose=0)
        model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0)
def network(regl1, regl2, weight_init, dropout, optimize):   
    
    #create network architecture
    model = Sequential()
    
    model.add(Convolution2D(16, 7, 7,input_shape=(1, 256, 192),W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(32, 6, 6, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(64, 3, 3, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(64, 2, 2, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Flatten())
    model.add(Dense(50,W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    #model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    
    model.add(Dense(output_dim=1))
    model.add(Activation('sigmoid'))    

    model.compile(optimizer=optimize, loss='binary_crossentropy', metrics=['accuracy'])
    
    return model
Exemple #20
0
 def __init__(self,  **settings):
     # max_features, embedding_dim, seqlen,
     # nb_filter, filter_size, activation, dropout_p,
     # l1reg, l2reg, batchnorm,
     self.settings = settings
     self.settings['verbosity'] = 2
     seqlen = self.settings['seqlen']
     l1reg = self.settings['l1reg']
     l2reg = self.settings['l2reg']
     self.nn = Sequential()
     self.nn.add(Embedding(input_dim=self.settings['max_features'],
                           output_dim=self.settings['embedding_dim'],
                           input_length=seqlen))
     self.nn.add(Dropout(self.settings['dropout_p']))
     self.nn.add(Convolution1D(self.settings['nb_filter'],
                               self.settings['filter_size'],
                               activation=self.settings['activation']))
     self.nn.add(Convolution1D(self.settings['nb_filter'],
                               self.settings['filter_size'],
                               activation=self.settings['activation']))
     self.nn.add(MaxPooling1D(pool_length=2))
     self.nn.add(Convolution1D(self.settings['nb_filter'],
                               self.settings['filter_size'],
                               activation=self.settings['activation']))
     self.nn.add(Convolution1D(self.settings['nb_filter'],
                               self.settings['filter_size'],
                               activation=self.settings['activation']))
     self.nn.add(MaxPooling1D(pool_length=2))
     self.nn.add(Convolution1D(self.settings['nb_filter'],
                               self.settings['filter_size'],
                               activation=self.settings['activation']))
     self.nn.add(Convolution1D(self.settings['nb_filter'],
                               self.settings['filter_size'],
                               activation=self.settings['activation']))
     self.nn.add(MaxPooling1D(pool_length=2))
     self.nn.add(Flatten())
     self.nn.add(Dropout(self.settings['dropout_p']))
     self.nn.add(Dense(10))
     self.nn.add(Activation(self.settings['activation']))
     self.nn.add(Dense(10))
     self.nn.add(Activation(self.settings['activation']))
     if (l1reg is not None and l1reg is float and l2reg is not
             None and l2reg is float):
         self.nn.add(Dense(1), W_regularizer=l1l2(l1reg, l2reg))
     elif (l2reg is not None and l2reg is float):
         self.nn.add(Dense(1), W_regularizer=l2(l2reg))
     elif (l1reg is not None and l1reg is float):
         self.nn.add(Dense(1), W_regularizer=l1(l1reg))
     else:
         self.nn.add(Dense(1))
     if (self.settings['batchnorm'] is True):
         self.nn.add(BatchNormalization())
     self.nn.add(Activation('sigmoid'))
def create_model():

    model = Sequential()
    model.add(
        Dense(280,
              input_dim=16,
              init='normal',
              activation='relu',
              W_regularizer=l1l2(l1=5e-06, l2=5e-06),
              activity_regularizer=l1l2(l1=0, l2=1e-5))
    )  #W_regularizer=l1(0.000001), activity_regularizer=activity_l1(0.000001)))
    model.add(Dropout(0.25))
    model.add(
        Dense(370, activation='relu', activity_regularizer=l1l2(l1=0,
                                                                l2=5e-5)))
    model.add(Dropout(0.5))
    model.add(Dense(120, activation='relu', W_regularizer=l1l2(l1=0,
                                                               l2=5e-06)))
    model.add(Dropout(0.55))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    admax = Adamax(lr=0.002,
                   beta_1=0.9,
                   beta_2=0.999,
                   epsilon=1e-08,
                   decay=0.0)  #decay ? 0.002
    """
	reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.97, patience=1, min_lr=0.00001)
	callbacks = [
		EarlyStopping(monitor='val_loss', patience=25, verbose=0),
		ModelCheckpoint("/home/admin-7036/Documents/Projet python/bosongit/weigh.hdf", monitor='val_loss', save_best_only=True, verbose=0),
		reduce_lr
	
	]
	"""
    model.compile(optimizer=admax,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])  # Gradient descent
    return model
Exemple #22
0
 def __init__(self, filename, labels, model_type, feature_params=None, model=None,
              layers=1, layer_dim=100, activation="tanh", normalize=False,
              init="glorot_normal", max_num_labels=100, batch_size=10,
              minibatch_size=200, nb_epochs=5, dropout=0,
              optimizer="adam", loss="categorical_crossentropy",
              regularizer="l2", regularization=1e-8):
     """
     Create a new untrained NN or copy the weights from an existing one
     :param labels: a list of labels that can be updated later to add a new label
     :param feature_params: dict of feature type name -> FeatureInformation
     :param model: if given, copy the weights (from a trained model)
     :param layers: number of hidden layers
     :param layer_dim: size of hidden layer
     :param activation: activation function at hidden layers
     :param normalize: perform batch normalization after each layer?
     :param init: initialization type for hidden layers
     :param max_num_labels: since model size is fixed, set maximum output size
     :param batch_size: fit model every this many items
     :param minibatch_size: batch size for SGD
     :param nb_epochs: number of epochs for SGD
     :param dropout: dropout to apply to input layer
     :param optimizer: algorithm to use for optimization
     :param loss: objective function to use for optimization
     :param regularizer: regularization type (None, l1, l2 or l1l2)
     :param regularization: regularization parameter lambda
     """
     super(NeuralNetwork, self).__init__(model_type=model_type, filename=filename,
                                         labels=labels, model=model)
     assert feature_params is not None or model is not None
     if self.is_frozen:
         self.model = model
     else:
         self.max_num_labels = max_num_labels
         self._layers = layers
         self._layer_dim = layer_dim
         self._activation = (lambda x: x*x*x) if activation == "cube" else activation
         self._normalize = normalize
         self._init = init
         self._num_labels = self.num_labels
         self._minibatch_size = minibatch_size
         self._nb_epochs = nb_epochs
         self._dropout = dropout
         self._optimizer = optimizer
         self._loss = (lambda t, p: K.sum(K.maximum(0., 1.-p*t+p*(1.-t)))) if loss == "max_margin" else loss
         self._regularizer = (lambda: None) if regularizer is None else \
             (lambda: regularizers.l1l2(regularization, regularization)) if regularizer == "l1l2" else \
             (lambda: regularizers.get(regularizer, {"l": regularization}))
         self.feature_params = feature_params
         self.model = None
     self._batch_size = batch_size
     self._item_index = 0
     self._iteration = 0
Exemple #23
0
 def test_dense(self):
     input_data = np.random.random_sample([1, 10])
     layer = Dense(2, init='one', activation="relu",
                   input_shape=(10, ), W_regularizer=l1l2(l1=0.01, l2=0.02))
     self.modelTestSingleLayer(input_data, layer, dump_weights=True)
     layer2 = Dense(2, init='one', activation="softplus",
                    input_shape=(10, ), b_regularizer=l2(0.02))
     self.modelTestSingleLayer(input_data, layer2, dump_weights=True)
     layer3 = Dense(2, init='one', input_shape=(10, ),
                    W_regularizer=keras.regularizers.WeightRegularizer(l1=0.1))
     self.modelTestSingleLayer(input_data, layer3, dump_weights=True)
     layer4 = Dense(2, init='glorot_uniform', activation="hard_sigmoid", input_shape=(10, ))
     self.modelTestSingleLayer(input_data, layer4, dump_weights=True)
Exemple #24
0
def build_model(params):

    n_hidden_layers = int(np.round(params['n_hidden_layers'][0]))
    n_neurons = int(np.round(params['n_neurons'][0]))
    log_l1_weight_reg = np.float32(params['log_l1_weight_reg'][0])
    log_l2_weight_reg = np.float32(params['log_l2_weight_reg'][0])
    #prob_drop_out = float(params['prob_drop_out'][ 0 ].astype('float32'))
    prob_drop_out = np.float32(params['prob_drop_out'][0])
    log_l_rate = np.float32(params['log_learning_rate'][0])

    print n_hidden_layers
    print n_neurons
    print log_l1_weight_reg
    print log_l2_weight_reg
    print prob_drop_out
    print log_l_rate

    model = Sequential()
    model.add(Dense(n_neurons, input_shape = (784,), W_regularizer=l1l2(l1 = np.exp(log_l1_weight_reg), \
        l2 = np.exp(log_l2_weight_reg))))
    model.add(Activation('relu'))
    prob_drop_out = float(prob_drop_out)
    model.add(Dropout(prob_drop_out))
    #model.add(Dropout(0.35))
    for i in range(n_hidden_layers - 1):
        model.add(Dense(n_neurons, W_regularizer=l1l2(l1 = np.exp(log_l1_weight_reg), \
            l2 = np.exp(log_l2_weight_reg))))
        model.add(Activation('relu'))
        model.add(Dropout(prob_drop_out))
        #model.add(Dropout(0.35))
    n_classes = 10
    model.add(Dense(n_classes))
    model.add(Activation('softmax'))
    adam = Adam(lr=np.exp(log_l_rate), beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])

    return model
Exemple #25
0
def model4(model, ENT_EMB, REL_EMB, MAXLEN, HIDDEN_SIZE):
	from keras.layers.averagelayer import Average
	from keras.regularizers import l1l2, l2
	model.add_shared_node(Embedding(ENT_EMB,HIDDEN_SIZE, input_length = MAXLEN),name = 'embedding', inputs = ['input1','input2'])
	model.add_shared_node(Average(), name='avg', inputs=['embedding'])
	prev = 'avg'
	for layer in xrange(3):
		model.add_shared_node(Dense(HIDDEN_SIZE, activation = 'relu', W_regularizer = l1l2(l1 = 0.00001, l2 = 0.00001)), name='dense'+str(layer+1), inputs=[prev])
		model.add_shared_node(Dropout(0.25),name='dense'+str(layer+1) + '_d', inputs = ['dense'+str(layer+1)] )
		prev = 'dense'+str(layer+1)+'_d'
	model.add_shared_node(Layer(), name='merge_siam', inputs=[prev], merge_mode = 'concat', concat_axis = -1)

	return model
def model3():
    model = Sequential()
    model.add(Convolution2D(nb_filter=10,nb_row=5,nb_col=7,input_shape=(600,600,1),W_regularizer=l1l2()))
    model.add(MaxPooling2D(pool_size=(3,3)))
    model.add(PReLU())
    model.add(Flatten())
    model.add(Dropout(0.5))
    model.add(Dense(output_dim=10,W_regularizer=l1l2()))
    model.add(PReLU())
    model.add(Dropout(0.5))
    model.add(Dense(output_dim=8,activation='softmax'))

    return model
def create_model():

    model = Sequential()

    model.add(
        Dense(100,
              input_dim=147,
              init='normal',
              activation='relu',
              W_regularizer=l1l2(l1=1E-6, l2=1E-5),
              activity_regularizer=l1l2(l1=0, l2=1e-7))
    )  #W_regularizer=l1(0.000001), activity_regularizer=activity_l1(0.000001)))

    model.add(Dropout(0.1))
    model.add(
        Dense(540,
              activation='relu',
              W_regularizer=l1l2(l1=1e-07, l2=0),
              activity_regularizer=l1l2(l1=0, l2=5e-7)))
    model.add(Dropout(0.3))
    model.add(Dense(310, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(450, activation='relu'))
    model.add(Dropout(0.4))

    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    admax = Adamax(lr=0.005,
                   beta_1=0.9,
                   beta_2=0.999,
                   epsilon=1e-08,
                   decay=0.0)  #decay ? 0.002

    model.compile(optimizer=admax,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])  # Gradient descent

    return model
Exemple #28
0
def build_model(ncell, nmark, nfilter, coeff_l1, coeff_l2,
                k, dropout, dropout_p, regression, n_classes, lr=0.01):

    """ Builds the neural network architecture """

    # the input layer
    data_input = Input(shape=(ncell, nmark))

    # the filters
    conv = Convolution1D(nfilter, 1, activation='linear',
                         W_regularizer=l1l2(l1=coeff_l1, l2=coeff_l2),
                         name='conv1')(data_input)
    conv = Activation('relu')(conv) ### filter responses?
    # the cell grouping part
    pooled = Lambda(select_top, output_shape=(nfilter,), arguments={'k':k})(conv)

    # possibly add dropout
    if dropout or ((dropout == 'auto') and (nfilter > 5)):
        pooled = Dropout(p=dropout_p)(pooled)

    # network prediction output
    if not regression:
        output = Dense(n_classes, activation='softmax',
                       W_regularizer=l1l2(l1=coeff_l1, l2=coeff_l2),
                       name='output')(pooled)
    else:
        output = Dense(1, activation='linear', W_regularizer=l1l2(l1=coeff_l1, l2=coeff_l2),
                       name='output')(pooled)
    model = Model(input=data_input, output=output)


    if not regression:
        model.compile(optimizer=Adam(lr=lr),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
    else:
        model.compile(optimizer=Adam(lr=lr),
                      loss='mean_squared_error')
    return model
def build_model(train):

    input_dim = train.shape[1]
    model = Sequential()
    model.add(
        Dense(15,
              input_dim=input_dim,
              init='glorot_normal',
              activation='tanh',
              W_regularizer=l1l2(l1=1e-4, l2=1e-4)))
    model.add(Dropout(0.15))
    model.add(Dense(1, init='zero', activation='linear'))
    model.compile(loss="mse", optimizer="adagrad")
    return model
Exemple #30
0
def timed_sgd_Elastic(x1,y1,x2=None,y2=None,
    lambda1=0.01, lambda2=0.01, lr=0.1, decay=1e-2, nesterov=True, momentum=0.8, batch_size=100,nb_epoch=50):
    if (x2 is None)^(y2 is None): raise ValueError("if you specify x2 or y2 you need to specify the other as well")
    if x2 is None and y2 is None: x2=x1; y2=y1 #if no Cross-validation set, use original
    print("Running SGD Elastic with lambda1: ", lambda1, " lambda2: ", lambda2, " lr: ", lr, " decay: ", decay, " Momentum: ", momentum, " Batch size: ", batch_size)
    time0 = time() # Start timer
    earlystopper = EarlyStopping(monitor='loss', patience=1)
    sgd=SGD(lr=lr, decay=decay, nesterov=nesterov, momentum=momentum)
    model = Sequential()
    model.add(Dense(1, input_dim=K, activation='linear', W_regularizer=l1l2(lambda1,lambda2)))
    model.compile(loss='mse', optimizer=sgd)
    model.fit(x1, y1,nb_epoch=nb_epoch,batch_size=batch_size, show_accuracy=True, callbacks=[earlystopper], verbose=0)
    score = model.evaluate(x2, y2, batch_size=20)
    return time()-time0, score
Exemple #31
0
 def l1l2_penalty_reg(alpha=1.0, l1_ratio=0.5):
     '''Calculate L1 and L2 penalties for a Keras layer
     This follows the same formulation as in the R package glmnet and Sklearn
     Args:
         alpha ([float]): amount of regularization.
         l1_ratio ([float]): portion of L1 penalty. Setting to 1.0 equals 
                 Lasso.
     '''
     if l1_ratio == .0:
         return l2(alpha)
     elif l1_ratio == 1.:
         return l1(alpha)
     else:
         return l1l2(l1_ratio * alpha, 1. / 2 * (1 - l1_ratio) * alpha)
def vgg16(input_shape=(224, 224, 3),
          weights=None,
          vgg_transfer=None,
          activation_fn='relu',
          l1=0.00001,
          l2=0.00001,
          dropout=0.5):
    input_tensor = Input(shape=input_shape)

    if vgg_transfer is not None:
        vgg_transfer = 'imagenet'

    vgg = VGG16(input_tensor=input_tensor,
                include_top=False,
                weights=vgg_transfer)

    x = Flatten(name='flatten')(vgg.output)
    x = Dense(4096,
              activation=activation_fn,
              name='fc1',
              W_regularizer=l1l2(l1, l2),
              b_regularizer=l1l2(l1, l2))(x)
    x = Dropout(dropout)(x)
    x = Dense(4096,
              activation=activation_fn,
              name='fc2',
              W_regularizer=l1l2(l1, l2),
              b_regularizer=l1l2(l1, l2))(x)
    x = Dense(2, activation='softmax', name='predictions')(x)

    model = Model(input=input_tensor, output=x)

    if weights is not None:
        model.load_weights(weights)

    return model
def make_model(dropout, nb_filters, nb_conv, nb_pool,weight_initiation,activation_function,l1_reg,l2_reg):
    '''Creates model comprised of 2 convolutional layers followed by dense layers
    dense_layer_sizes: List of layer sizes. This list has one number for each layer
    nb_filters: Number of convolutional filters in each convolutional layer
    nb_conv: Convolutional kernel size
    nb_pool: Size of pooling area for max pooling
    '''
    model = Sequential()

    model.add(Convolution2D(8, 8, 8,
		                border_mode='valid',
		                input_shape=(1, img_rows, img_cols),subsample = (4,4),W_regularizer=l1l2(l1 = l1_reg,l2=l2_reg),b_regularizer=l1l2(l1 = l1_reg,l2=l2_reg),init=weight_initiation))
    model.add(Activation(activation_function))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    model.add(Dropout(dropout))
  
    model.add(Convolution2D(8, 5, 5,W_regularizer=l1l2(l1 = l1_reg,l2=l2_reg),b_regularizer=l1l2(l1 = l1_reg,l2=l2_reg),subsample = (2,2),init=weight_initiation))
    model.add(Activation(activation_function))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    model.add(Dropout(dropout))

    model.add(Convolution2D(16, nb_conv, nb_conv,W_regularizer=l1l2(l1=l1_reg,l2=l2_reg),b_regularizer=l1l2(l1 = l1_reg,l2=l2_reg),init=weight_initiation))
    model.add(Activation(activation_function))
    model.add(Dropout(dropout))

    model.add(Flatten())
    model.add(Dense(5,W_regularizer=l1l2(l1 = l1_reg,l2=l2_reg),b_regularizer=l1l2(l1 = l1_reg,l2=l2_reg),init=weight_initiation))
    model.add(Activation(activation_function))
    model.add(Dropout(dropout))

    model.add(Dense(output_dim=1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy', optimizer='adadelta')

    return model
Exemple #34
0
def dan(DIM = 0, HIDDEN_SIZE = 0 , DROPOUT = 0, LAYERS = 1, UNIT = 'lstm', MAX_Q = 0, MAX_A = 0, FINETUNE = False, embedding_weights = None):
	from keras.layers.averagelayer import Average
	model = Graph()

	if FINETUNE:
		model.add_input(name = 'q', input_shape = (None,), dtype = 'int64')
		model.add_input(name = 'a', input_shape = (None,), dtype = 'int64')
		VOCAB = embedding_weights.shape[0]
		EMB_HIDDEN_SIZE = embedding_weights.shape[1]
		model.add_node(Embedding(VOCAB,EMB_HIDDEN_SIZE,mask_zero=True, weights=[embedding_weights]), name = 'q_e', input = 'q')
		model.add_node(Embedding(VOCAB,EMB_HIDDEN_SIZE,mask_zero=True, weights=[embedding_weights]), name = 'a_e', input = 'a')
		prev_q = 'q_e'
		prev_a = 'a_e'
	else:
		model.add_input(name = 'q', input_shape = (None,DIM))
		model.add_input(name = 'a', input_shape = (None,DIM))
		prev_q = 'q'
		prev_a = 'a'
		EMB_HIDDEN_SIZE = DIM

#	model.add_node(Average(),name = 'avg_a', inputs = [prev_a], merge_mode = 't_ave')
#	model.add_node(Average(), name = 'avg_q', inputs = [prev_q], merge_mode = 't_ave')
	model.add_node(Average(),name = 'avg_a', input = prev_a)
	model.add_node(Average(), name = 'avg_q', input = prev_q)

	model.add_node(Dense(HIDDEN_SIZE, activation = 'relu', W_regularizer = l1l2(l1 = 0.00001, l2 = 0.00001)), name = 'd_0', inputs = ['avg_a','avg_q'], merge_mode = 'concat',  concat_axis = -1)
	model.add_node(Dropout(DROPOUT), name = 'd_0_dr', input = 'd_0')
	prev = 'd_0_dr'
	for layer in xrange(LAYERS-1):
		model.add_node(Dense(HIDDEN_SIZE, activation = 'relu', W_regularizer = l1l2(l1 = 0.00001, l2 = 0.00001)), name = 'd_' + str(layer+1), input = prev)
		model.add_node(Dropout(DROPOUT), name = 'd_' + str(layer+1) + '_dr', input = 'd_' + str(layer+1))
		prev = 'd_' + str(layer+1) + '_dr'

	model.add_node(Dense(1, activation = 'sigmoid'), name = 'sigmoid', input = prev)
	model.add_output(name = 'o', input = 'sigmoid')
	return model
def add_input(filed_name, input_len_field, layers_name, total_len = 0):
    temp_len = int(np.ceil(Log(input_len_field)) + 1)
    # Input layer
    globals()["input_{F}".format(F=filed_name)] = Input(shape=(input_len_field,),
                                                        name="input_{F}".format(F=filed_name)
                                                        )
    # "Emmbeding" layer
    globals()["input_{F}_D".format(F=filed_name)] = Dense(temp_len,
                                                          activation='sigmoid',
                                                          init='uniform',
                                                          name="input_{F}_D".format(F=filed_name),
                                                          W_regularizer=l1l2(l1=l1_reglazation, l2=l2_reglazation)
                                                          )(globals()["input_{F}".format(F=filed_name)])
    layers_name.append(globals()["input_{F}_D".format(F=filed_name)])
    total_len[0] += temp_len
Exemple #36
0
  def train(self):
    self.model = Sequential()
    self.model.add(Dense(100, input_dim=self.N, W_regularizer=l1l2(), b_regularizer=l1l2(), activity_regularizer=activity_l1l2()))
    self.model.add(Activation('relu'))
    self.model.add(Dropout(0.25))
    self.model.add(Dense(100, W_regularizer=l1l2(), b_regularizer=l1l2(), activity_regularizer=activity_l1l2()))
    self.model.add(Activation('relu'))
    self.model.add(Dropout(0.25))
    self.model.add(Dense(100, W_regularizer=l1l2(), b_regularizer=l1l2(), activity_regularizer=activity_l1l2()))
    self.model.add(Activation('relu'))
    self.model.add(Dropout(0.25))
    self.model.add(Dense(100, W_regularizer=l1l2(), b_regularizer=l1l2(), activity_regularizer=activity_l1l2()))
    self.model.add(Activation('relu'))
    self.model.add(Dropout(0.25))
    self.model.add(Dense(1))
    self.model.compile(loss='mean_squared_error', optimizer='rmsprop')

    self.model.fit(np.array(self.X), np.array(self.Y), show_accuracy=True, batch_size=16, nb_epoch=2000, verbose=2)
def autoencoder(train, tests, valid, ws, compression_rate, max_epochs, activation, sparcity, optimizer):
    # Compute the layer sizes.
    layer1 = int(ws * compression_rate)
    layer2 = int(layer1 * compression_rate)

    # Construct a Keras model.
    model = Sequential()
    regular = None
    if sparcity == 'l1':
        regular = l1()
    elif sparcity == 'l2':
        regular = l2()
    elif sparcity == 'l1l2':
        regular = l1l2()

    # Add the first set of connections to the network
    model.add(Dense(layer1, input_dim=ws, activation=activation, W_regularizer=regular))
    model.add(Dense(layer2, input_dim=layer1, activation=activation, W_regularizer=regular))
    model.add(Dense(layer1, input_dim=layer2, activation=activation, W_regularizer=regular))
    model.add(Dense(ws, input_dim=layer1, activation=activation, W_regularizer=regular))

    # Compile the model using binary crossentropy and rmsprop optimization.
    model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])

    # Train the model using early stopping.
    best_accuracy = -1.0
    keep_training = True
    epochs_done = 0
    while keep_training:
        epochs_done += 5
        print("____Epochs done = " + str(epochs_done))
        # Fit the model using 64 additional epochs.
        model.fit(train, train, batch_size=1, nb_epoch=5, verbose=0)
        loss_tests, accuracy_tests = model.evaluate(tests, tests, batch_size=1, verbose=0)
        # If the accuracy deteriorates or the epochs done exceeds the max epochs stop training.
        if accuracy_tests <= best_accuracy or epochs_done >= max_epochs:
            keep_training = False
        else:
            best_accuracy = accuracy_tests

    # Compute the performance metrics and then compute their significance.
    loss_train, accuracy_train = model.evaluate(train, train, batch_size=1, verbose=0)
    loss_tests, accuracy_tests = model.evaluate(tests, tests, batch_size=1, verbose=0)
    loss_valid, accuracy_valid = model.evaluate(valid, valid, batch_size=1, verbose=0)
    print("____" + str(loss_train) + ";" + str(loss_tests) + ";" + str(loss_valid))
    return loss_train, loss_tests, loss_valid
Exemple #38
0
def discriminator_model(sample_dimension, layers, activation=None,
                        reg=lambda: l1l2(1e-5, 1e-5), dropout=0.25, batch_norm_mode=0):
    model = Sequential()
    model.add(Dense(sample_dimension, input_shape=(sample_dimension,)))
    for layer_dims in layers:
        model.add(Dense(layer_dims, W_regularizer=reg()))
        if batch_norm_mode is not None:
            model.add(BatchNormalization(mode=batch_norm_mode))
        if activation is not None:
            if type(activation) is str:
                model.add(Activation(activation))
            else:
                model.add(Activation(activation()))
        if dropout is not None and dropout > 0:
            model.add(Dropout(dropout))
    model.add(Dense(1, activation='sigmoid'))
    return model
Exemple #39
0
 def test_dense(self):
     input_data = np.random.random_sample([2, 10, 5, 7])
     layer = Dense(2, init='one', activation="relu",
                   input_shape=(10, 5, 7), W_regularizer=l1l2(l1=0.01, l2=0.02))
     self.modelTestSingleLayer(input_data, layer, dump_weights=True)
     input_data2 = np.random.random_sample([2, 10])
     layer2 = Dense(2, init='one', activation="softplus",
                    input_shape=(10, ), b_regularizer=l2(0.02))
     self.modelTestSingleLayer(input_data2, layer2, dump_weights=True)
     layer3 = Dense(2, init='one', input_shape=(10, ),
                    W_regularizer=keras.regularizers.WeightRegularizer(l1=0.1))
     self.modelTestSingleLayer(input_data2, layer3, dump_weights=True)
     layer4 = Dense(2, init='glorot_uniform', activation="hard_sigmoid", input_shape=(10, ))
     self.modelTestSingleLayer(input_data2, layer4, dump_weights=True)
     # Test for unsupported init_method. Should get a warning not an exception.
     layer5 = Dense(4, init='he_uniform', input_shape=(10, ))
     self.modelTestSingleLayer(input_data2, layer5, dump_weights=True)
def model_discriminator(latent_dim, input_shape, output_dim=1, hidden_dim=1024,
                        reg=lambda: l1l2(1e-4, 1e-4), batch_norm_mode=1):
    z = Input((latent_dim,))
    x = Input(input_shape, name="x")
    h = merge([z, Flatten()(x)], mode='concat')
    h = Dense(hidden_dim, name="discriminator_h1", W_regularizer=reg())(h)
    h = BatchNormalization(mode=batch_norm_mode)(h)
    h = LeakyReLU(0.2)(h)
    h = Dropout(0.5)(h)
    h = Dense(hidden_dim / 2, name="discriminator_h2", W_regularizer=reg())(h)
    h = BatchNormalization(mode=batch_norm_mode)(h)
    h = LeakyReLU(0.2)(h)
    h = Dropout(0.5)(h)
    h = Dense(hidden_dim / 4, name="discriminator_h3", W_regularizer=reg())(h)
    h = BatchNormalization(mode=batch_norm_mode)(h)
    h = LeakyReLU(0.2)(h)
    h = Dropout(0.5)(h)
    y = Dense(output_dim, name="discriminator_y", activation="sigmoid", W_regularizer=reg())(h)
    return Model([z, x], y, name="discriminator")
Exemple #41
0
def model_discriminator(input_shape, hidden_dim=1024, reg=lambda: l1l2(1e-5, 1e-5), dropout=0.5, batch_norm_mode=1,
                        output_activation="sigmoid"):
    return Sequential([
        Flatten(name="discriminator_flatten", input_shape=input_shape),
        Dense(hidden_dim, name="discriminator_h1", W_regularizer=reg()),
        batch_norm(batch_norm_mode),
        LeakyReLU(0.2),
        dropout_layer(dropout),
        Dense(hidden_dim / 2, name="discriminator_h2", W_regularizer=reg()),
        batch_norm(batch_norm_mode),
        LeakyReLU(0.2),
        dropout_layer(dropout),
        Dense(hidden_dim / 4, name="discriminator_h3", W_regularizer=reg()),
        batch_norm(batch_norm_mode),
        LeakyReLU(0.2),
        dropout_layer(dropout),
        Dense(1, name="discriminator_y", W_regularizer=reg()),
        Activation(output_activation)],
        name="discriminator")
def createModel(input_shape, tf_ordering=True, second_phase = False):
    print("Creating new model with input shape", input_shape)

    axis = -1
    if not(tf_ordering):
        axis = 1
    alpha = 0.1
    w_reg = 0.0001
    
    print("Hyperparameters: alpha=%f, w_reg=%f"%(alpha, w_reg))
    
    path1 = Sequential()
    path1.add(Convolution2D(64, 7, 7, border_mode='valid', input_shape = input_shape, W_regularizer=l1l2(l1 = w_reg, l2 = w_reg), trainable=not(second_phase)))
    path1.add(Dropout(alpha))
    path1.add(Activation('relu'))
    path1.add(MaxPooling2D(pool_size=(4,4), strides=(1,1), border_mode='valid'))

    path1.add(Convolution2D(64, 3, 3, border_mode='valid', W_regularizer=l1l2(l1 = w_reg, l2 = w_reg), trainable=not(second_phase)))
    path1.add(Dropout(alpha))
    path1.add(Activation('relu'))
    path1.add(MaxPooling2D(pool_size=(2,2), strides=(1,1), border_mode='valid'))
   
    path2 = Sequential()
    path2.add(Convolution2D(160, 13, 13, border_mode='valid', input_shape = input_shape, W_regularizer=l1l2(l1 = w_reg, l2 = w_reg), trainable=not(second_phase)))
    path2.add(Dropout(alpha))
    path2.add(Activation('relu'))
    
    classification_layer = Sequential()
    classification_layer.add(Merge([path1, path2], mode='concat', concat_axis=axis))
    classification_layer.add(Convolution2D(5, 21, 21, border_mode='valid', W_regularizer=l1l2(l1 = w_reg, l2 = w_reg)))
    classification_layer.add(Dropout(alpha))
    classification_layer.add(Flatten())
    classification_layer.add(Activation('softmax'))
    
    sgd = SGD(lr=0.005, decay = 1e-1, momentum=0.5, nesterov=True)
    adam = Adam(lr=0.0005)
    classification_layer.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy', dice])
    
    classification_layer.summary()
    
    return classification_layer
    def comp_double(self):
        '''
        double model. Simialar to two-pathway, except takes in a 4x33x33 patch and it's center 4x5x5 patch. merges paths at flatten layer.
        '''
        print ('Compiling double model...')
        single = Sequential()
        single.add(Convolution2D(64, 7, 7, border_mode='valid', W_regularizer=l1l2(l1=0.01, l2=0.01), input_shape=(4,33,33)))
        single.add(Activation('relu'))
        single.add(BatchNormalization(mode=0, axis=1))
        single.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))
        single.add(Dropout(0.5))
        single.add(Convolution2D(nb_filter=128, nb_row=5, nb_col=5, activation='relu', border_mode='valid', W_regularizer=l1l2(l1=0.01, l2=0.01)))
        single.add(BatchNormalization(mode=0, axis=1))
        single.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))
        single.add(Dropout(0.5))
        single.add(Convolution2D(nb_filter=256, nb_row=5, nb_col=5, activation='relu', border_mode='valid', W_regularizer=l1l2(l1=0.01, l2=0.01)))
        single.add(BatchNormalization(mode=0, axis=1))
        single.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))
        single.add(Dropout(0.5))
        single.add(Convolution2D(nb_filter=128, nb_row=3, nb_col=3, activation='relu', border_mode='valid', W_regularizer=l1l2(l1=0.01, l2=0.01)))
        single.add(Dropout(0.25))
        single.add(Flatten())

        # add small patch to train on
        five = Sequential()
        five.add(Reshape((100,1), input_shape = (4,5,5)))
        five.add(Flatten())
        five.add(MaxoutDense(128, nb_feature=5))
        five.add(Dropout(0.5))

        model = Sequential()
        # merge both paths
        model.add(Merge([five, single], mode='concat', concat_axis=1))
        model.add(Dense(5))
        model.add(Activation('softmax'))

        sgd = SGD(lr=0.001, decay=0.01, momentum=0.9)
        model.compile(loss='categorical_crossentropy', optimizer='sgd')
        print ('Done.')
        return model
    def comp_double(self):
        '''
        double model. Simialar to two-pathway, except takes in a 4x33x33 patch and it's center 4x5x5 patch. merges paths at flatten layer.
        '''
        print 'Compiling double model...'
        single = Sequential()
        single.add(Convolution2D(64, 7, 7, border_mode='valid', W_regularizer=l1l2(l1=0.01, l2=0.01), input_shape=(4,33,33)))
        single.add(Activation('relu'))
        single.add(BatchNormalization(mode=0, axis=1))
        single.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))
        single.add(Dropout(0.5))
        single.add(Convolution2D(nb_filter=128, nb_row=5, nb_col=5, activation='relu', border_mode='valid', W_regularizer=l1l2(l1=0.01, l2=0.01)))
        single.add(BatchNormalization(mode=0, axis=1))
        single.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))
        single.add(Dropout(0.5))
        single.add(Convolution2D(nb_filter=256, nb_row=5, nb_col=5, activation='relu', border_mode='valid', W_regularizer=l1l2(l1=0.01, l2=0.01)))
        single.add(BatchNormalization(mode=0, axis=1))
        single.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))
        single.add(Dropout(0.5))
        single.add(Convolution2D(nb_filter=128, nb_row=3, nb_col=3, activation='relu', border_mode='valid', W_regularizer=l1l2(l1=0.01, l2=0.01)))
        single.add(Dropout(0.25))
        single.add(Flatten())

        # add small patch to train on
        five = Sequential()
        five.add(Reshape((100,1), input_shape = (4,5,5)))
        five.add(Flatten())
        five.add(MaxoutDense(128, nb_feature=5))
        five.add(Dropout(0.5))

        model = Sequential()
        # merge both paths
        model.add(Merge([five, single], mode='concat', concat_axis=1))
        model.add(Dense(5))
        model.add(Activation('softmax'))

        sgd = SGD(lr=0.001, decay=0.01, momentum=0.9)
        model.compile(loss='categorical_crossentropy', optimizer='sgd')
        print 'Done.'
        return model
Exemple #45
0
def model_discriminator(latent_dim,
                        input_shape,
                        output_dim=1,
                        hidden_dim=2048,
                        reg=lambda: l1l2(1e-7, 1e-7),
                        batch_norm_mode=1,
                        dropout=0.5):
    z = Input((latent_dim, ))
    x = Input(input_shape, name="x")
    h = merge([z, Flatten()(x)], mode='concat')

    h1 = Dense(hidden_dim, name="discriminator_h1", W_regularizer=reg())
    b1 = BatchNormalization(mode=batch_norm_mode)
    h2 = Dense(hidden_dim, name="discriminator_h2", W_regularizer=reg())
    b2 = BatchNormalization(mode=batch_norm_mode)
    h3 = Dense(hidden_dim, name="discriminator_h3", W_regularizer=reg())
    b3 = BatchNormalization(mode=batch_norm_mode)
    y = Dense(output_dim,
              name="discriminator_y",
              activation="sigmoid",
              W_regularizer=reg())

    # training model uses dropout
    _h = h
    _h = Dropout(dropout)(LeakyReLU(0.2)((b1(h1(_h)))))
    _h = Dropout(dropout)(LeakyReLU(0.2)((b2(h2(_h)))))
    _h = Dropout(dropout)(LeakyReLU(0.2)((b3(h3(_h)))))
    ytrain = y(_h)
    mtrain = Model([z, x], ytrain, name="discriminator_train")

    # testing model does not use dropout
    _h = h
    _h = LeakyReLU(0.2)((b1(h1(_h))))
    _h = LeakyReLU(0.2)((b2(h2(_h))))
    _h = LeakyReLU(0.2)((b3(h3(_h))))
    ytest = y(_h)
    mtest = Model([z, x], ytest, name="discriminator_test")

    return mtrain, mtest
def model_discriminator(latent_dim,
                        input_shape,
                        output_dim=1,
                        hidden_dim=512,
                        activation="tanh",
                        reg=lambda: l1l2(1e-3, 1e-3)):
    z = Input((latent_dim, ))
    x = Input(input_shape, name="x")
    h = merge([z, Flatten()(x)], mode='concat')
    h = Dense(hidden_dim,
              name="discriminator_h1",
              activation=activation,
              W_regularizer=reg())(h)
    h = Dense(hidden_dim,
              name="discriminator_h2",
              activation=activation,
              W_regularizer=reg())(h)
    y = Dense(output_dim,
              name="discriminator_y",
              activation="sigmoid",
              W_regularizer=reg())(h)
    return Model([z, x], y, name="discriminator")
Exemple #47
0
def model_generator():
    model = Sequential()
    nch = 256
    reg = lambda: l1l2(l1=1e-7, l2=1e-7)
    h = 5
    model.add(Dense(input_dim=100, output_dim=nch * 4 * 4,
                    W_regularizer=reg()))
    model.add(BatchNormalization(mode=0))
    model.add(Reshape(dim_ordering_shape((nch, 4, 4))))
    model.add(
        Convolution2D(int(nch / 2),
                      4,
                      4,
                      border_mode='same',
                      W_regularizer=reg()))
    model.add(BatchNormalization(mode=0, axis=1))
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(
        Convolution2D(int(nch / 2),
                      h,
                      h,
                      border_mode='same',
                      W_regularizer=reg()))
    model.add(BatchNormalization(mode=0, axis=1))
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(
        Convolution2D(int(nch / 4),
                      h,
                      h,
                      border_mode='same',
                      W_regularizer=reg()))
    model.add(BatchNormalization(mode=0, axis=1))
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(3, h, h, border_mode='same', W_regularizer=reg()))
    model.add(Activation('sigmoid'))
    return model
Exemple #48
0
 def __init__(self,  **settings):
     # max_features, embedding_dim, seqlen,
     # nb_filter, filter_widths, activation, dropout_p,
     # l1reg, l2reg, batchnorm,
     self.settings = settings
     self.settings['verbosity'] = 2
     seqlen = self.settings['seqlen']
     l1reg = self.settings['l1reg']
     l2reg = self.settings['l2reg']
     conv_filters = []
     for n_gram in settings['filter_widths']:
         sequential = Sequential()
         conv_filters.append(sequential)
         sequential.add(Embedding(input_dim=self.settings['max_features'],
                                  output_dim=self.settings['embedding_dim'],
                                  input_length=seqlen))
         sequential.add(Dropout(self.settings['dropout_p']))
         sequential.add(Convolution1D(self.settings['nb_filter'],
                                      n_gram,
                                      activation=self.settings['activation']
                                      ))
         sequential.add(MaxPooling1D(pool_length=seqlen - n_gram + 1))
         sequential.add(Flatten())
     self.nn = Sequential()
     self.nn.add(Merge(conv_filters, mode='concat'))
     self.nn.add(Dropout(self.settings['dropout_p']))
     if (l1reg is not None and l1reg is float and l2reg is not
             None and l2reg is float):
         self.nn.add(Dense(1), W_regularizer=l1l2(l1reg, l2reg))
     elif (l2reg is not None and l2reg is float):
         self.nn.add(Dense(1), W_regularizer=l2(l2reg))
     elif (l1reg is not None and l1reg is float):
         self.nn.add(Dense(1), W_regularizer=l1(l1reg))
     else:
         self.nn.add(Dense(1))
     if (self.settings['batchnorm'] is True):
         self.nn.add(BatchNormalization())
     self.nn.add(Activation('sigmoid'))
Exemple #49
0
    def build_model(self):
        print('Building model...')
        self.model = Sequential()

        self.model.add(
            Convolution2D(32,
                          3,
                          3,
                          border_mode='same',
                          input_shape=(3, 32, 32)))
        self.model.add(LeakyReLU(0.1))
        self.model.add(Convolution2D(32, 3, 3, border_mode='same'))
        self.model.add(LeakyReLU(0.1))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Convolution2D(32, 3, 3, border_mode='same'))
        self.model.add(LeakyReLU(0.1))
        self.model.add(Convolution2D(32, 3, 3, border_mode='same'))
        self.model.add(LeakyReLU(0.1))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Convolution2D(32, 3, 3, border_mode='same'))
        self.model.add(LeakyReLU(0.1))
        self.model.add(Convolution2D(32, 3, 3, border_mode='same'))
        self.model.add(LeakyReLU(0.1))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Convolution2D(32, 3, 3, border_mode='same'))
        self.model.add(LeakyReLU(0.1))
        self.model.add(Convolution2D(32, 3, 3, border_mode='same'))
        self.model.add(LeakyReLU(0.1))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.model.add(Flatten())

        self.model.add(Dense(100, W_regularizer=l1l2()))
        self.model.add(LeakyReLU(0.1))
        self.model.add(Dense(10))
        self.model.add(Activation('softmax'))
Exemple #50
0
def create_MP_model(params, input_dims, W_mask=None):

    num_model = len(input_dims)

    # Create Model
    print('Start Build Model...')
    model_BP = [Sequential() for i in range(num_model)]

    for i in range(num_model):
        model_BP[i].add(
            Masking(mask_value=1.0,
                    input_shape=[params['maxlen'], input_dims[i]]))
        model_BP[i].add(
            TimeDistributedDenseWithWmask(
                params['MP_per_model'],
                input_dim=input_dims[i],
                W_mask=W_mask,
                W_regularizer=WeightRegularizerWithPmask(
                    l2=params['reg_weight'], p_mask=W_mask),
                init='lecun_uniform',
                activation='relu'))
        #model_BP[i].add(MaskedMaxPooling1D(pool_length = 100, stride =10))
        model_BP[i].add(TemporalPyramidMaxPooling(tp_layer=params['tp_layer']))

    if (num_model > 1):
        model = Sequential()
        model.add(Merge(model_BP, mode='concat', concat_axis=-1))
    else:
        model = model_BP[0]
    model.add(
        Dense(params['nb_classes'],
              W_regularizer=l1l2(l1=params['l1_weight'],
                                 l2=params['reg_weight'])))

    model.compile(loss=multiclass_hinge,
                  optimizer=get_optimizer(params),
                  metrics=["accuracy"])
    return model
def create_MP_model(params,input_dims,W_mask=None):
    
    num_model = len(input_dims) 

    # Create Model
    print('Start Build Model...')
    model_BP = [Sequential() for i in range(num_model)]
           
    for i in range(num_model):
        model_BP[i].add(Masking(mask_value=1.0,input_shape=[params['maxlen'], input_dims[i]]))
        model_BP[i].add(TimeDistributedDenseWithWmask(params['MP_per_model'], input_dim=input_dims[i], W_mask=W_mask, W_regularizer=WeightRegularizerWithPmask(l2=params['reg_weight'],p_mask=W_mask),init='lecun_uniform',activation='relu'))
        #model_BP[i].add(MaskedMaxPooling1D(pool_length = 100, stride =10))
        model_BP[i].add(TemporalPyramidMaxPooling(tp_layer=params['tp_layer']))

    if (num_model > 1):
        model = Sequential()
        model.add(Merge(model_BP,mode='concat',concat_axis = -1))
    else:
        model = model_BP[0]    
    model.add(Dense(params['nb_classes'], W_regularizer=l1l2(l1=params['l1_weight'], l2=params['reg_weight'])))

    model.compile(loss=multiclass_hinge, optimizer=get_optimizer(params),metrics=["accuracy"])
    return model
Exemple #52
0
def train_model_l1(l1penalty, generator):
    print('Build model...')
    model = Sequential()
    model.add(
        Dense(1,
              input_shape=(input_dim, ),
              W_regularizer=l1l2(l1=l1penalty, l2=l1penalty)))
    model.add(Activation('sigmoid'))
    adam_optimizer = Adam(lr=0.01, decay=0.99)

    # try using different optimizers and different optimizer configs
    model.compile(loss='binary_crossentropy',
                  optimizer=adam_optimizer,
                  metrics=['accuracy'])

    ############################################################################
    model.fit_generator(generator=generator,
                        nb_worker=1,
                        nb_epoch=10,
                        samples_per_epoch=samples_per_epoch)
    ####################################################
    print("MODEL DONE")
    return model
    def comp_two_path(self):
        '''
        compiles two-path model, takes in a 4x33x33 patch and assesses global and local paths, then merges the results.
        '''
        print 'Compiling two-path model...'
        model = Graph()
        model.add_input(name='input', input_shape=(self.n_chan, 33, 33))

        # local pathway, first convolution/pooling
        model.add_node(Convolution2D(64, 7, 7, border_mode='valid', activation='relu', W_regularizer=l1l2(l1=0.01, l2=0.01)), name='local_c1', input= 'input')
        model.add_node(MaxPooling2D(pool_size=(4,4), strides=(1,1), border_mode='valid'), name='local_p1', input='local_c1')

        # local pathway, second convolution/pooling
        model.add_node(Dropout(0.5), name='drop_lp1', input='local_p1')
        model.add_node(Convolution2D(64, 3, 3, border_mode='valid', activation='relu', W_regularizer=l1l2(l1=0.01, l2=0.01)), name='local_c2', input='drop_lp1')
        model.add_node(MaxPooling2D(pool_size=(2,2), strides=(1,1), border_mode='valid'), name='local_p2', input='local_c2')

        # global pathway
        model.add_node(Convolution2D(160, 13, 13, border_mode='valid', activation='relu', W_regularizer=l1l2(l1=0.01, l2=0.01)), name='global', input='input')

        # merge local and global pathways
        model.add_node(Dropout(0.5), name='drop_lp2', input='local_p2')
        model.add_node(Dropout(0.5), name='drop_g', input='global')
        model.add_node(Convolution2D(5, 21, 21, border_mode='valid', activation='relu',  W_regularizer=l1l2(l1=0.01, l2=0.01)), name='merge', inputs=['drop_lp2', 'drop_g'], merge_mode='concat', concat_axis=1)

        # Flatten output of 5x1x1 to 1x5, perform softmax
        model.add_node(Flatten(), name='flatten', input='merge')
        model.add_node(Dense(5, activation='softmax'), name='dense_output', input='flatten')
        model.add_output(name='output', input='dense_output')

        sgd = SGD(lr=0.005, decay=0.1, momentum=0.9)
        model.compile('sgd', loss={'output':'categorical_crossentropy'})
        print 'Done.'
        return model
    def compile_model(self):
        '''
        compiles standard single model with 4 convolitional/max-pooling layers.
        '''
        print 'Compiling single model...'
        single = Sequential()

        single.add(Convolution2D(self.n_filters[0], self.k_dims[0], self.k_dims[0], border_mode='valid', W_regularizer=l1l2(l1=self.w_reg, l2=self.w_reg), input_shape=(self.n_chan,33,33)))
        single.add(Activation(self.activation))
        single.add(BatchNormalization(mode=0, axis=1))
        single.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))
        single.add(Dropout(0.5))
        single.add(Convolution2D(self.n_filters[1], self.k_dims[1], self.k_dims[1], activation=self.activation, border_mode='valid', W_regularizer=l1l2(l1=self.w_reg, l2=self.w_reg)))
        single.add(BatchNormalization(mode=0, axis=1))
        single.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))
        single.add(Dropout(0.5))
        single.add(Convolution2D(self.n_filters[2], self.k_dims[2], self.k_dims[2], activation=self.activation, border_mode='valid', W_regularizer=l1l2(l1=self.w_reg, l2=self.w_reg)))
        single.add(BatchNormalization(mode=0, axis=1))
        single.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))
        single.add(Dropout(0.5))
        single.add(Convolution2D(self.n_filters[3], self.k_dims[3], self.k_dims[3], activation=self.activation, border_mode='valid', W_regularizer=l1l2(l1=self.w_reg, l2=self.w_reg)))
        single.add(Dropout(0.25))

        single.add(Flatten())
        single.add(Dense(5))
        single.add(Activation('softmax'))

        sgd = SGD(lr=0.001, decay=0.01, momentum=0.9)
        single.compile(loss='categorical_crossentropy', optimizer='sgd')
        print 'Done.'
        return single
Exemple #55
0
def build_model(in_dim, out_dim=1, n_hidden=100, l1_norm=0.0,
                l2_norm=0, n_deep=5, drop=0.1,
                learning_rate=0.1, optimizer='Adadelta',
                activation='tanh'):
    model = Sequential()
    # Input layer
    model.add(Dense(
        input_dim=in_dim,
        output_dim=n_hidden,
        init='uniform',
        activation=activation,
        W_regularizer=l1l2(l1=l1_norm, l2=l2_norm)))

    # do X layers
    for layer in range(n_deep-1):
        model.add(Dropout(drop))
        model.add(Dense(
            output_dim=np.round(n_hidden/2**(layer+1)),
            init='uniform',
            activation=activation))

    # Output layer
    if out_dim == 1:
        activation = activation
    else:
        activation = 'softmax'

    model.add(Dense(out_dim,
                    init='uniform',
                    activation=activation))

    # Optimization algorithms
    if optimizer == 'Adadelta':
        if learning_rate is None:
            opt = Adadelta()
        else:
            opt = Adadelta(lr=learning_rate)
    elif optimizer =='SGD':
        if learning_rate is None:
            opt = SGD(lr=learning_rate)
        else:
            opt = SGD()
    elif optimizer == 'RMSprop':
        if learning_rate is None:
            opt = RMSprop(lr=learning_rate)
        else:
            opt = RMSprop()
    elif optimizer == 'Adagrad':
        if learning_phase is None:
            opt = Adagrad(lr=learning_rate)
        else:
            opt = Adagrad()
    elif optimizer == 'Adam':
        if learning_rate is None:
            opt = Adam(lr=learning_rate)
        else:
            opt = Adam()
    elif optimizer == 'Adamax':
        if learning_rate is None:
            opt = Adamax(lr=learning_rate)
        else:
            opt = Adamax()
    else:
        logger.info('Optimizer {} not defined, using Adadelta'.format(optimizer))
        opt = Adadelta(lr=learning_rate)

    if out_dim == 1:
        model.compile(loss='binary_crossentropy',
                      optimizer=opt)
    else:
        model.compile(loss='categorical_crossentropy',
                      optimizer=opt)

    return model
def build_model(train):
    
    input_dim = train.shape[1]
    model = Sequential()
    model.add(Dense(15, input_dim = input_dim, init = 'glorot_normal', activation='tanh', W_regularizer=l1l2(l1 = 1e-4, l2 = 1e-4)))
    model.add(Dropout(0.15))
    model.add(Dense(1, init = 'zero', activation='linear'))
    model.compile(loss = "mse", optimizer = "adagrad")
    
    return model
Exemple #57
0
def build_model(
    in_dim,
    out_dim=1,
    n_hidden=100,
    l1_norm=0.0,
    l2_norm=0,
    n_deep=5,
    drop=0.1,
    learning_rate=0.1,
    optimizer="Adadelta",
    activation="tanh",
    n_class=1,
):
    model = Sequential()
    # Input layer
    model.add(
        Dense(
            input_dim=in_dim,
            output_dim=n_hidden,
            init="uniform",
            activation=activation,
            W_regularizer=l1l2(l1=l1_norm, l2=l2_norm),
        )
    )

    # do X layers
    for layer in range(n_deep - 1):
        model.add(Dropout(drop))
        model.add(Dense(output_dim=n_hidden, init="uniform", activation=activation))  # np.round(n_hidden/2**(layer+1)),

    # Output layer
    if out_dim == 1:
        activation = activation
    elif n_class == 1 and self.n_label > 2:
        activation = "softmax"
    elif n_class > 1:
        activation = "sigmoid"

    model.add(Dense(out_dim, init="uniform", activation=activation))

    # Optimization algorithms
    if optimizer == "Adadelta":
        if learning_rate is None:
            opt = Adadelta()
        else:
            opt = Adadelta(lr=learning_rate)
    elif optimizer == "SGD":
        if learning_rate is None:
            opt = SGD()
        else:
            opt = SGD(lr=learning_rate)
    elif optimizer == "RMSprop":
        if learning_rate is None:
            opt = RMSprop()
        else:
            opt = RMSprop(lr=learning_rate)
    elif optimizer == "Adagrad":
        if learning_rate is None:
            opt = Adagrad()
        else:
            opt = Adagrad(lr=learning_rate)
    elif optimizer == "Adam":
        if learning_rate is None:
            opt = Adam()
        else:
            opt = Adam(lr=learning_rate)
    elif optimizer == "Adamax":
        if learning_rate is None:
            opt = Adamax()
        else:
            opt = Adamax(lr=learning_rate)
    else:
        logger.info("Optimizer {} not defined, using Adadelta".format(optimizer))
        opt = Adadelta(lr=learning_rate)

    if out_dim == 1:
        model.compile(loss="binary_crossentropy", optimizer=opt)
    else:
        model.compile(loss="categorical_crossentropy", optimizer=opt)

    return model
Exemple #58
0
            X_train[row, n, dictionary.index(tri)] = 1.0
for row, (word, tag) in enumerate(test_tri):
    for n, w in enumerate(word):
        for tri in w:
            X_test[row, n, dictionary.index(tri)] = 1.0

# Predictive model
y_train = np.zeros((len(train), 2), dtype=np.float32)
y_test = np.zeros((len(test), 2), dtype=np.float32)
for row, (tri, tag) in enumerate(train_tri):
    y_train[row, 1 if tag=='pos' else 0] = 1.0
for row, (tri, tag) in enumerate(test_tri):
    y_test[row, 1 if tag=='pos' else 0] = 1.0

# http://research.microsoft.com/apps/pubs/default.aspx?id=256230
model = Sequential()
model.add(Convolution1D(1000, 3, activation='tanh', W_regularizer=l1l2(l1=0.01, l2=0.01), b_regularizer=l1l2(l1=0.01, l2=0.01), init='uniform', border_mode='same', input_shape=(max_len,len(dictionary))))
model.add(MaxPooling1D(pool_length=max_len, border_mode='valid'))
model.add(Flatten())
model.add(Dense(300, activation='tanh', W_regularizer=l1l2(l1=0.01, l2=0.01), b_regularizer=l1l2(l1=0.01, l2=0.01)))
model.add(CosSim(2, activation='linear', bias=False, W_regularizer=l1l2(l1=0.01, l2=0.01)))
model.add(Activation(activation='softmax'))
def PredictiveLoss(y_true, y_pred):
    return -K.mean(K.log(y_true * y_pred), axis=-1)
model.compile(optimizer='Adagrad',
      loss=PredictiveLoss,
      metrics=['accuracy'])
model.fit(X_train, y_train, validation_data=(X_test, y_test), batch_size=2, nb_epoch=30)
loss, acc = model.evaluate(X_test, y_test)
#plot(model, to_file='cdssm_pred_model.png')
Exemple #59
0
	def constructModel(self, *args, **kwargs):
		print("Model Compilation Start...")
		
		############## Initialization ################
		convInit = "he_normal"
		
		############## Regularization ################
		reg      = KR.l1l2(l1=0.0002, l2=0.0002)
		#reg      = None
		
		############## Optimizer      ################
		baseLr = 0.001
		#opt      = KO.SGD     (lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
		#opt      = KO.RMSprop ()
		#opt      = KO.Adagrad ()
		#opt      = KO.Adadelta()
		opt      = KO.Adam    (lr=baseLr, beta_1=0.9, beta_2=0.999, epsilon=1e-8, clipvalue=5)
		#opt      = KO.Adamax  (lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
		
		opt.baseLr  = baseLr
		opt.lrDecay = 0.85
		
		############### Structural #################
		f1     =  48;
		f2_s1  =  16; f2_e1 =  32; f2_e3 =  32;
		f3_s1  =  16; f3_e1 =  32; f3_e3 =  32;
		f4_s1  =  32; f4_e1 =  64; f4_e3 =  64;
		f5_s1  =  32; f5_e1 =  64; f5_e3 =  64;
		f6_s1  =  48; f6_e1 =  96; f6_e3 =  96;
		f7_s1  =  48; f7_e1 =  96; f7_e3 =  96;
		f8_s1  =  64; f8_e1 = 128; f8_e3 = 128;
		f9_s1  =  64; f9_e1 = 128; f9_e3 = 128;
		f10    =   2;
		
		############### Model         #################
		model = KM.Graph()
		
		model.add_input ("input",  (3,192,192), (50,3,192,192))
		model.add_node  (KLCv.AveragePooling2D  ((2,2), (2,2), "valid"),                              "input_medium",       input="input")
		model.add_node  (KLCv.AveragePooling2D  ((2,2), (2,2), "valid"),                              "input_coarse",       input="input_medium")
		
		model.add_node  (KLCv.Convolution2D     (f1,    7, 7, border_mode="same", subsample=(1,1), init=convInit, W_regularizer=reg),   "conv1_fine/act",     input="input")
		model.add_node  (KLCv.Convolution2D     (f1,    7, 7, border_mode="same", subsample=(1,1), init=convInit, W_regularizer=reg),   "conv1_medium/act",   input="input_medium")
		model.add_node  (KLCv.Convolution2D     (f1,    7, 7, border_mode="same", subsample=(1,1), init=convInit, W_regularizer=reg),   "conv1_coarse/act",   input="input_coarse")
		
		model.add_node  (KLCv.MaxPooling2D      ((4,4), (4,4), "same"),                               "finepool/out",       input="conv1_fine/act")
		model.add_node  (KLCv.MaxPooling2D      ((2,2), (2,2), "same"),                               "mediumpool/out",     input="conv1_medium/act")
		
		model.add_node  (KLN. BatchNormalization(axis=1),                                             "bn1/out",            inputs=["finepool/out", "mediumpool/out", "conv1_coarse/act"], concat_axis=1)
		
		model.add_node  (KLCo.Activation        ("relu"),                                             "conv1/out",          input="bn1/out")
		
		model.add_node  (KLCv.Convolution2D     (f2_s1, 1, 1, border_mode="same", init=convInit, W_regularizer=reg),     "fire2/comp/act",     input="conv1/out")
		model.add_node  (KLN. BatchNormalization(axis=1),                                             "bn2c/out",           input="fire2/comp/act")
		model.add_node  (KLCo.Activation        ("relu"),                                             "fire2/comp/out",     input="bn2c/out")
		model.add_node  (KLCv.Convolution2D     (f2_e1, 1, 1, border_mode="same", init=convInit, W_regularizer=reg),     "fire2/exp1/act",     input="fire2/comp/out")
		model.add_node  (KLCv.Convolution2D     (f2_e3, 3, 3, border_mode="same", init=convInit, W_regularizer=reg),     "fire2/exp3/act",     input="fire2/comp/out")
		model.add_node  (KLCo.Activation        ("relu"),                                             "fire2/exp/out",      inputs=["fire2/exp1/act", "fire2/exp3/act"], concat_axis=1)
		
		model.add_node  (KLCv.Convolution2D     (f3_s1, 1, 1, border_mode="same", init=convInit, W_regularizer=reg),     "fire3/comp/act",     input="fire2/exp/out")
		model.add_node  (KLN. BatchNormalization(axis=1),                                             "bn3c/out",           input="fire3/comp/act")
		model.add_node  (KLCo.Activation        ("relu"),                                             "fire3/comp/out",     input="bn3c/out")
		model.add_node  (KLCv.Convolution2D     (f3_e1, 1, 1, border_mode="same", init=convInit, W_regularizer=reg),     "fire3/exp1/act",     input="fire3/comp/out")
		model.add_node  (KLCv.Convolution2D     (f3_e3, 3, 3, border_mode="same", init=convInit, W_regularizer=reg),     "fire3/exp3/act",     input="fire3/comp/out")
		model.add_node  (KLCo.Activation        ("relu"),                                             "fire3/exp/out",      inputs=["fire3/exp1/act", "fire3/exp3/act"], concat_axis=1)
		
		model.add_node  (KLCv.Convolution2D     (f4_s1, 1, 1, border_mode="same", init=convInit, W_regularizer=reg),     "fire4/comp/act",     input="fire3/exp/out")
		model.add_node  (KLN. BatchNormalization(axis=1),                                             "bn4c/out",           input="fire4/comp/act")
		model.add_node  (KLCo.Activation        ("relu"),                                             "fire4/comp/out",     input="bn4c/out")
		model.add_node  (KLCv.Convolution2D     (f4_e1, 1, 1, border_mode="same", init=convInit, W_regularizer=reg),     "fire4/exp1/act",     input="fire4/comp/out")
		model.add_node  (KLCv.Convolution2D     (f4_e3, 3, 3, border_mode="same", init=convInit, W_regularizer=reg),     "fire4/exp3/act",     input="fire4/comp/out")
		model.add_node  (KLCo.Activation        ("relu"),                                             "fire4/exp/out",      inputs=["fire4/exp1/act", "fire4/exp3/act"], concat_axis=1)
		
		model.add_node  (KLCv.MaxPooling2D      ((3,3), (2,2), "same"),                               "maxpool4/out",       input="fire4/exp/out")
		
		model.add_node  (KLCv.Convolution2D     (f5_s1, 1, 1, border_mode="same", init=convInit, W_regularizer=reg),     "fire5/comp/act",     input="maxpool4/out")
		model.add_node  (KLN. BatchNormalization(axis=1),                                             "bn5c/out",           input="fire5/comp/act")
		model.add_node  (KLCo.Activation        ("relu"),                                             "fire5/comp/out",     input="bn5c/out")
		model.add_node  (KLCv.Convolution2D     (f5_e1, 1, 1, border_mode="same", init=convInit, W_regularizer=reg),     "fire5/exp1/act",     input="fire5/comp/out")
		model.add_node  (KLCv.Convolution2D     (f5_e3, 3, 3, border_mode="same", init=convInit, W_regularizer=reg),     "fire5/exp3/act",     input="fire5/comp/out")
		model.add_node  (KLCo.Activation        ("relu"),                                             "fire5/exp/out",      inputs=["fire5/exp1/act", "fire5/exp3/act"], concat_axis=1)
		
		model.add_node  (KLCv.Convolution2D     (f6_s1, 1, 1, border_mode="same", init=convInit, W_regularizer=reg),     "fire6/comp/act",     input="fire5/exp/out")
		model.add_node  (KLN. BatchNormalization(axis=1),                                             "bn6c/out",           input="fire6/comp/act")
		model.add_node  (KLCo.Activation        ("relu"),                                             "fire6/comp/out",     input="bn6c/out")
		model.add_node  (KLCv.Convolution2D     (f6_e1, 1, 1, border_mode="same", init=convInit, W_regularizer=reg),     "fire6/exp1/act",     input="fire6/comp/out")
		model.add_node  (KLCv.Convolution2D     (f6_e3, 3, 3, border_mode="same", init=convInit, W_regularizer=reg),     "fire6/exp3/act",     input="fire6/comp/out")
		model.add_node  (KLCo.Activation        ("relu"),                                             "fire6/exp/out",      inputs=["fire6/exp1/act", "fire6/exp3/act"], concat_axis=1)
		
		model.add_node  (KLCv.Convolution2D     (f7_s1, 1, 1, border_mode="same", init=convInit, W_regularizer=reg),     "fire7/comp/act",     input="fire6/exp/out")
		model.add_node  (KLN. BatchNormalization(axis=1),                                             "bn7c/out",           input="fire7/comp/act")
		model.add_node  (KLCo.Activation        ("relu"),                                             "fire7/comp/out",     input="bn7c/out")
		model.add_node  (KLCv.Convolution2D     (f7_e1, 1, 1, border_mode="same", init=convInit, W_regularizer=reg),     "fire7/exp1/act",     input="fire7/comp/out")
		model.add_node  (KLCv.Convolution2D     (f7_e3, 3, 3, border_mode="same", init=convInit, W_regularizer=reg),     "fire7/exp3/act",     input="fire7/comp/out")
		model.add_node  (KLCo.Activation        ("relu"),                                             "fire7/exp/out",      inputs=["fire7/exp1/act", "fire7/exp3/act"], concat_axis=1)
		
		model.add_node  (KLCv.Convolution2D     (f8_s1, 1, 1, border_mode="same", init=convInit, W_regularizer=reg),     "fire8/comp/act",     input="fire7/exp/out")
		model.add_node  (KLN. BatchNormalization(axis=1),                                             "bn8c/out",           input="fire8/comp/act")
		model.add_node  (KLCo.Activation        ("relu"),                                             "fire8/comp/out",     input="bn8c/out")
		model.add_node  (KLCv.Convolution2D     (f8_e1, 1, 1, border_mode="same", init=convInit, W_regularizer=reg),     "fire8/exp1/act",     input="fire8/comp/out")
		model.add_node  (KLCv.Convolution2D     (f8_e3, 3, 3, border_mode="same", init=convInit, W_regularizer=reg),     "fire8/exp3/act",     input="fire8/comp/out")
		model.add_node  (KLCo.Activation        ("relu"),                                             "fire8/exp/out",      inputs=["fire8/exp1/act", "fire8/exp3/act"], concat_axis=1)
		
		model.add_node  (KLCv.MaxPooling2D      ((3,3), (2,2), "same"),                               "maxpool8/out",       input="fire8/exp/out")
		
		model.add_node  (KLCv.Convolution2D     (f9_s1, 1, 1, border_mode="same", init=convInit, W_regularizer=reg),     "fire9/comp/act",     input="maxpool8/out")
		model.add_node  (KLN. BatchNormalization(axis=1),                                             "bn9c/out",           input="fire9/comp/act")
		model.add_node  (KLCo.Activation        ("relu"),                                             "fire9/comp/out",     input="bn9c/out")
		model.add_node  (KLCv.Convolution2D     (f9_e1, 1, 1, border_mode="same", init=convInit, W_regularizer=reg),     "fire9/exp1/act",     input="fire9/comp/out")
		model.add_node  (KLCv.Convolution2D     (f9_e3, 3, 3, border_mode="same", init=convInit, W_regularizer=reg),     "fire9/exp3/act",     input="fire9/comp/out")
		model.add_node  (KLCo.Activation        ("relu"),                                             "fire9/exp/out",      inputs=["fire9/exp1/act", "fire9/exp3/act"], concat_axis=1)
		
		model.add_node  (KLCv.Convolution2D     (f10  , 1, 1, border_mode="same", init=convInit, W_regularizer=reg),     "conv10/act",         input="fire9/exp/out")
		model.add_node  (KLN. BatchNormalization(axis=1),                                             "bn10/out",           input="conv10/act")
		model.add_node  (KLCo.Activation        ("relu"),                                             "conv10/out",         input="bn10/out")
		
		model.add_node  (KLCv.AveragePooling2D  ((12,12), (1,1), "valid"),                            "avgpool10/out",      input="conv10/out")
		
		model.add_node  (KLCo.Reshape           ((f10,)),                                             "softmax/in",         input="avgpool10/out")
		model.add_node  (KLCo.Activation        ("softmax"),                                          "softmax/out",        input="softmax/in")
		
		model.add_output("output", "softmax/out")
		
		model.compile(loss={"output":'categorical_crossentropy'}, optimizer=opt)
		#model.powerf = T.function()
		KUV.plot(model, to_file='model.png', show_shape=True)
		
		print("Model Compilation End.")
		
		#pdb.set_trace()
		
		return model
    logger.info('samples_per_epoch:%s' % args.samples_per_epoch)

    logger.info('use_per_class_threshold_tuning:%s' % args.use_per_class_threshold_tuning)

    logger.info('evaluation_re_try_times:%s' % args.evaluation_re_try_times)
    logger.info('evaluation_re_try_waiting_time:%s' % args.evaluation_re_try_waiting_time)
    logger.info('fail_on_evlauation_failure:%s' % args.fail_on_evlauation_failure)

    if args.weight_regularizer_hidden == 'none':
        logger.info('weight_regularizer_hidden:No')
        weight_regularizer_hidden = None
    elif args.weight_regularizer_hidden == 'l1l2':
        logger.info('weight_regularizer_hidden:%s, l1_regularizer_weight_hidden:%s,l2_regularizer_weight_hidden:%s' % (
        args.weight_regularizer_hidden, args.l1_regularizer_weight_hidden, args.l2_regularizer_weight_hidden))
        from keras.regularizers import l1l2
        weight_regularizer_hidden = l1l2(l1=args.l1_regularizer_weight_hidden, l2=args.l2_regularizer_weight_hidden)
    elif args.weight_regularizer_hidden == 'l1':
        logger.info('weight_regularizer_hidden:%s, l1_regularizer_weight_hidden:%s' % (
        args.weight_regularizer_hidden, args.l1_regularizer_weight_hidden))
        from keras.regularizers import l1
        weight_regularizer_hidden = l1(l=args.l1_regularizer_weight_hidden)
    elif args.weight_regularizer_hidden == 'l2':
        logger.info('weight_regularizer_hidden:%s, l2_regularizer_weight_hidden:%s' % (
        args.weight_regularizer_hidden, args.l2_regularizer_weight_hidden))
        from keras.regularizers import l2
        weight_regularizer_hidden = l2(l=args.l2_regularizer_weight_hidden)

    if weight_regularizer_hidden:
        weight_regularizer_hidden = weight_regularizer_hidden.get_config()

    #weight_regularizer_proj