コード例 #1
0
def build_CNN_input(vocab_size=VocabSize, usewords=USEWORDS,
                    skiptop=skipTop, devsplit=DEVSPLIT, verbose=True, **kwargs):
    """

    :param vocab_size:
    :param usewords:
    :param skiptop:
    :param devsplit:
    :param verbose:
    :param kwargs:
    :return:
    """

    if verbose:
        print('Building CNN Inputs')

    ((X_train, y_train), (X_dev, y_dev), (X_val, y_val)) = build_design_matrix(vocab_size=vocab_size,
                                                                               use_words=usewords,
                                                                               skip_top=skiptop,
                                                                               dev_split=devsplit,
                                                                               **kwargs
                                                                               )

    if verbose:
        print('X_train shape: {}'.format(X_train.shape))
        print('X_dev shape: {}'.format(X_dev.shape))
        print('X_test shape: {}'.format(X_val.shape))
        print('y_train shape: {}'.format(y_train.shape))
        print('y_dev shape: {}'.format(y_dev.shape))
        print('y_test shape: {}'.format(y_val.shape))

    return {'training': (X_train, y_train), "dev": (X_dev, y_dev), "val": (X_val, y_val)}
コード例 #2
0
def build_CNN_input(usewords=USEWORDS, skiptop=skipTop, devsplit=DEVSPLIT, verbose=True):
	print('Building input')
	((X_train, y_train), (X_dev, y_dev), (X_test, y_test)) = build_design_matrix(VocabSize,
	                                                                             use_words=usewords,
	                                                                             skip_top=skiptop,
	                                                                             dev_split=devsplit)
	if verbose:
		print(len(X_train), 'train sequences')
		print(len(X_dev), 'test sequences')

		print('X_train shape:', X_train.shape)
		print('X_dev shape:', X_dev.shape)

		print('y_train shape:', y_train.shape)
		print('y_dev shape:', y_dev.shape)

	return X_train, y_train, X_dev, y_dev, X_test, y_test
コード例 #3
0
def build_CNN_input(usewords=USEWORDS,
                    skiptop=skipTop,
                    devsplit=DEVSPLIT,
                    verbose=True):
    print('Building input')
    ((X_train, y_train), (X_dev, y_dev),
     (X_test, y_test)) = build_design_matrix(VocabSize,
                                             use_words=usewords,
                                             skip_top=skiptop,
                                             dev_split=devsplit)
    if verbose:
        print(len(X_train), 'train sequences')
        print(len(X_dev), 'test sequences')

        print('X_train shape:', X_train.shape)
        print('X_dev shape:', X_dev.shape)

        print('y_train shape:', y_train.shape)
        print('y_dev shape:', y_dev.shape)

    return X_train, y_train, X_dev, y_dev, X_test, y_test
コード例 #4
0
pool_len2 = 2

num_filters3 = 300
filter_length3 = 4
stride_len3 = 1
pool_len3 = 2

embedding_dims = 200

hidden_dims = 100
num_epochs = 5

print( 'Loading data...' )

((X_train, y_train), (X_test, y_test)) = build_design_matrix( VocabSize,
                                                              use_words = USEWORDS,
                                                              skip_top = skipTop,
                                                              dev_split = DEVSPLIT )

print( len( X_train ), 'train sequences' )
print( len( X_test ), 'test sequences' )

print( 'X_train shape:', X_train.shape )
print( 'X_test shape:', X_test.shape )

print( 'Build model...' )
model = Sequential( )

# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add( Embedding( VocabSize, embedding_dims, input_length = maxReviewLen ) )
コード例 #5
0
def train_siamese_model():

	print( 'Loading data...' )

	((X_train, y_train), (X_test, y_test)) = build_design_matrix( VocabSize,
	                                                              use_words = USEWORDS,
	                                                              skip_top = skipTop,
	                                                              dev_split = DEVSPLIT )

	print( len( X_train ), 'train sequences' )
	print( len( X_test ), 'test sequences' )

	print( 'X_train shape:', X_train.shape )
	print( 'X_test shape:', X_test.shape )

	print( 'Build model...' )


	#LEFT and RIGHT branches of siamese network
	modelL = Sequential( )
	modelR = Sequential( )


	# we start off with an efficient embedding layer which maps
	# our vocab indices into embedding_dims dimensions
	modelL.add( Embedding( VocabSize, embedding_dims, input_length = maxReviewLen ) )
	modelR.add( Embedding( VocabSize, embedding_dims, input_length = maxReviewLen ) )

	modelL.add( Dropout( 0.10 ) )
	modelR.add( Dropout( 0.10 ) )
	###init changed from uniform to glorot_norm
	modelL.add( Convolution1D( nb_filter = num_filters1,
	                          filter_length = filter_length1,
	                          border_mode = 'valid',
	                          activation = 'relu',
	                          subsample_length = stride_len1,
	                          init = 'uniform'
	                          ) )

	modelR.add( Convolution1D( nb_filter = num_filters1,
	                           filter_length = filter_length1,
	                           border_mode = 'valid',
	                           activation = 'relu',
	                           subsample_length = stride_len1,
	                           init = 'uniform'
	                           ) )


	# input_length=maxReviewLen, input_dim=VocabSize

	modelL.add( Dropout( 0.25 ) )
	modelR.add( Dropout( 0.25 ) )


	modelL.add( MaxPooling1D( pool_length = 2 ) )
	modelR.add( MaxPooling1D( pool_length = 2 ) )




	modelL.add( Convolution1D( nb_filter = num_filters2,
	                          filter_length = filter_length2,
	                          border_mode = 'valid',
	                          activation = 'relu',
	                          subsample_length = stride_len2,
	                          init = 'uniform'

	                          ) )

	modelR.add( Convolution1D( nb_filter = num_filters2,
	                           filter_length = filter_length2,
	                           border_mode = 'valid',
	                           activation = 'relu',
	                           subsample_length = stride_len2,
	                           init = 'uniform'

	                           ) )



	modelL.add( Dropout( 0.40 ) )
	modelR.add( Dropout( 0.40 ) )

	# we use standard max pooling (halving the output of the previous layer):
	modelL.add( MaxPooling1D( pool_length = 2 ) )
	modelR.add( MaxPooling1D( pool_length = 2 ) )




	modelL.add( Convolution1D( nb_filter = num_filters3,
	                          filter_length = filter_length3,
	                          border_mode = 'valid',
	                          activation = 'relu',
	                          subsample_length = stride_len3,
	                          init = 'uniform'

	                          ) )

	modelR.add( Convolution1D( nb_filter = num_filters3,
	                           filter_length = filter_length3,
	                           border_mode = 'valid',
	                           activation = 'relu',
	                           subsample_length = stride_len3,
	                           init = 'uniform'

	                           ) )







	modelL.add( Dropout( 0.30 ) )
	modelR.add( Dropout( 0.30 ) )




	modelL.add( MaxPooling1D( pool_length = 2 ) )

	modelR.add( MaxPooling1D( pool_length = 2 ) )




	modelL.add( Convolution1D( nb_filter = num_filters4,
	                          filter_length = filter_length4,
	                          border_mode = 'valid',
	                          activation = 'relu',
	                          subsample_length = stride_len4,
	                          init = 'uniform'

	                          ) )

	modelR.add( Convolution1D( nb_filter = num_filters4,
	                           filter_length = filter_length4,
	                           border_mode = 'valid',
	                           activation = 'relu',
	                           subsample_length = stride_len4,
	                           init = 'uniform'

	                           ) )



	modelL.add( Dropout( 0.25 ) )
	modelR.add( Dropout( 0.25 ) )

	modelL.add( MaxPooling1D( pool_length = pool_len4 ) )
	modelR.add( MaxPooling1D( pool_length = pool_len4 ) )






	# We flatten the output of the conv layer,
	# so that we can add a vanilla dense layer:
	modelL.add( Flatten( ) )
	modelR.add( Flatten( ) )

	# We add a vanilla hidden layer:
	modelL.add( Dense( hidden_dims ) )
	modelL.add( Activation( 'relu' ) )

	modelR.add( Dense( hidden_dims ) )
	modelR.add( Activation( 'relu' ) )



	merged_vector = merge([modelL, modelR], mode='concat', concat_axis=-1)


	Gw=Dense(1, activation=DistanceMetric)(merged_vector)


	model.compile( loss = contrastiveLoss,
	               optimizer = 'rmsprop',
	               )
コード例 #6
0
def build_CNN_model():


	print( 'Loading data...' )

	((X_train, y_train), (X_test, y_test)) = build_design_matrix( VocabSize,
	                                                              use_words = USEWORDS,
	                                                              skip_top = skipTop,
	                                                              dev_split = DEVSPLIT )

	print( len( X_train ), 'train sequences' )
	print( len( X_test ), 'test sequences' )

	print( 'X_train shape:', X_train.shape )
	print( 'X_test shape:', X_test.shape )

	print( 'Build model...' )
	model = Sequential( )

	# we start off with an efficient embedding layer which maps
	# our vocab indices into embedding_dims dimensions
	model.add( Embedding( VocabSize, embedding_dims, input_length = maxReviewLen ) )

	model.add( Dropout( 0.10 ) )

	###init changed from uniform to glorot_norm
	model.add( Convolution1D( nb_filter = num_filters1,
	                          filter_length = filter_length1,
	                          border_mode = 'valid',
	                          activation = 'relu',
	                          subsample_length = stride_len1,
	                          init = 'uniform'
	                          ) )

	# input_length=maxReviewLen, input_dim=VocabSize

	model.add( Dropout( 0.25 ) )
	model.add( MaxPooling1D( pool_length = 2 ) )

	model.add( Convolution1D( nb_filter = num_filters2,
	                          filter_length = filter_length2,
	                          border_mode = 'valid',
	                          activation = 'relu',
	                          subsample_length = stride_len2,
	                          init = 'uniform'

	                          ) )

	model.add( Dropout( 0.40 ) )

	# we use standard max pooling (halving the output of the previous layer):
	model.add( MaxPooling1D( pool_length = 2 ) )

	model.add( Convolution1D( nb_filter = num_filters3,
	                          filter_length = filter_length3,
	                          border_mode = 'valid',
	                          activation = 'relu',
	                          subsample_length = stride_len3,
	                          init = 'uniform'

	                          ) )

	model.add( Dropout( 0.30 ) )

	model.add( MaxPooling1D( pool_length = 2 ) )



	model.add( Convolution1D( nb_filter = num_filters4,
	                          filter_length = filter_length4,
	                          border_mode = 'valid',
	                          activation = 'relu',
	                          subsample_length = stride_len4,
	                          init = 'uniform'

	                          ) )

	model.add( Dropout( 0.25 ) )

	model.add( MaxPooling1D( pool_length = pool_len4 ) )






	# We flatten the output of the conv layer,
	# so that we can add a vanilla dense layer:
	model.add( Flatten( ) )

	# We add a vanilla hidden layer:
	model.add( Dense( hidden_dims ) )
	model.add( Activation( 'relu' ) )

	# We project onto a single unit output layer, and squash it with a sigmoid:
	model.add( Dense( 1 ) )
	model.add( Activation( 'sigmoid' ) )

	model.compile( loss = 'binary_crossentropy',
	               optimizer = 'rmsprop',
	               )


	weightPath = './model_data/saved_weights/'+filename
	checkpoint = ModelCheckpoint( weightPath + '_W.{epoch:02d}-{val_loss:.2f}.hdf5',
	                              verbose = 1, )
	earlyStop = EarlyStopping(patience = 1,verbose = 1)

	call_backs = [checkpoint,earlyStop]

	return (model,X_train,y_train,X_test,y_test)
コード例 #7
0
num_filters3 = 300
filter_length3 = 4
stride_len3 = 1
pool_len3 = 2

embedding_dims = 200

hidden_dims = 100
num_epochs = 5

print('Loading data...')

((X_train, y_train), (X_test,
                      y_test)) = build_design_matrix(VocabSize,
                                                     use_words=USEWORDS,
                                                     skip_top=skipTop,
                                                     dev_split=DEVSPLIT)

print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')

print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)

print('Build model...')
model = Sequential()

# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(VocabSize, embedding_dims, input_length=maxReviewLen))
コード例 #8
0
filter_length2 = 2
stride_len2 = 2
pool_len2 = 2

embedding_dims = 400

hidden_dims = 100
num_epochs = 3

DEVSPLIT = 14
USEWORDS = True
print('Loading data...')

((X_train, y_train),
 (X_test, y_test)) = build_design_matrix(VocabSize,
                                         use_words=USEWORDS,
                                         skip_top=modelParameters.skip_top,
                                         dev_split=DEVSPLIT)

print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')

print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)

print('Build model...')
model = Sequential()

# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(VocabSize, embedding_dims, input_length=maxReviewLen))
model.add(Dropout(0.05))
コード例 #9
0
def train_siamese_model():

    print('Loading data...')

    ((X_train, y_train), (X_test,
                          y_test)) = build_design_matrix(VocabSize,
                                                         use_words=USEWORDS,
                                                         skip_top=skipTop,
                                                         dev_split=DEVSPLIT)

    print(len(X_train), 'train sequences')
    print(len(X_test), 'test sequences')

    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    print('Build model...')

    #LEFT and RIGHT branches of siamese network
    modelL = Sequential()
    modelR = Sequential()

    # we start off with an efficient embedding layer which maps
    # our vocab indices into embedding_dims dimensions
    modelL.add(Embedding(VocabSize, embedding_dims, input_length=maxReviewLen))
    modelR.add(Embedding(VocabSize, embedding_dims, input_length=maxReviewLen))

    modelL.add(Dropout(0.10))
    modelR.add(Dropout(0.10))
    ###init changed from uniform to glorot_norm
    modelL.add(
        Convolution1D(nb_filter=num_filters1,
                      filter_length=filter_length1,
                      border_mode='valid',
                      activation='relu',
                      subsample_length=stride_len1,
                      init='uniform'))

    modelR.add(
        Convolution1D(nb_filter=num_filters1,
                      filter_length=filter_length1,
                      border_mode='valid',
                      activation='relu',
                      subsample_length=stride_len1,
                      init='uniform'))

    # input_length=maxReviewLen, input_dim=VocabSize

    modelL.add(Dropout(0.25))
    modelR.add(Dropout(0.25))

    modelL.add(MaxPooling1D(pool_length=2))
    modelR.add(MaxPooling1D(pool_length=2))

    modelL.add(
        Convolution1D(nb_filter=num_filters2,
                      filter_length=filter_length2,
                      border_mode='valid',
                      activation='relu',
                      subsample_length=stride_len2,
                      init='uniform'))

    modelR.add(
        Convolution1D(nb_filter=num_filters2,
                      filter_length=filter_length2,
                      border_mode='valid',
                      activation='relu',
                      subsample_length=stride_len2,
                      init='uniform'))

    modelL.add(Dropout(0.40))
    modelR.add(Dropout(0.40))

    # we use standard max pooling (halving the output of the previous layer):
    modelL.add(MaxPooling1D(pool_length=2))
    modelR.add(MaxPooling1D(pool_length=2))

    modelL.add(
        Convolution1D(nb_filter=num_filters3,
                      filter_length=filter_length3,
                      border_mode='valid',
                      activation='relu',
                      subsample_length=stride_len3,
                      init='uniform'))

    modelR.add(
        Convolution1D(nb_filter=num_filters3,
                      filter_length=filter_length3,
                      border_mode='valid',
                      activation='relu',
                      subsample_length=stride_len3,
                      init='uniform'))

    modelL.add(Dropout(0.30))
    modelR.add(Dropout(0.30))

    modelL.add(MaxPooling1D(pool_length=2))

    modelR.add(MaxPooling1D(pool_length=2))

    modelL.add(
        Convolution1D(nb_filter=num_filters4,
                      filter_length=filter_length4,
                      border_mode='valid',
                      activation='relu',
                      subsample_length=stride_len4,
                      init='uniform'))

    modelR.add(
        Convolution1D(nb_filter=num_filters4,
                      filter_length=filter_length4,
                      border_mode='valid',
                      activation='relu',
                      subsample_length=stride_len4,
                      init='uniform'))

    modelL.add(Dropout(0.25))
    modelR.add(Dropout(0.25))

    modelL.add(MaxPooling1D(pool_length=pool_len4))
    modelR.add(MaxPooling1D(pool_length=pool_len4))

    # We flatten the output of the conv layer,
    # so that we can add a vanilla dense layer:
    modelL.add(Flatten())
    modelR.add(Flatten())

    # We add a vanilla hidden layer:
    modelL.add(Dense(hidden_dims))
    modelL.add(Activation('relu'))

    modelR.add(Dense(hidden_dims))
    modelR.add(Activation('relu'))

    merged_vector = merge([modelL, modelR], mode='concat', concat_axis=-1)

    Gw = Dense(1, activation=DistanceMetric)(merged_vector)

    model.compile(
        loss=contrastiveLoss,
        optimizer='rmsprop',
    )