Ejemplo n.º 1
0
def model_fn(net, X_len, max_reach, block_size, out_classes, batch_size, dtype,
             **kwargs):
    """
        Args:
        net -> Input tensor shaped (batch_size, max_reach + block_size + max_reach, 3)
        Returns:
        logits -> Unscaled logits tensor in time_major form, (block_size, batch_size, out_classes)
    """

    net = batch_normalization(net, decay=0.99, scope="Initial_bn")
    for block in range(1, 3):
        with tf.variable_scope("block%d" % block):
            for layer in range(kwargs['num_layers']):
                with tf.variable_scope("layer%d" % layer):
                    net = conv_1d(net, 64, 9, scope='conv1d')
                    net = batch_normalization(net, scope='bn')
                    net = tf.nn.relu(net)
            net = max_pool_1d(net, 2)
        net = tf.nn.relu(net)

    net = central_cut(net, block_size, 4)
    net = tf.transpose(net, [1, 0, 2], name="Shift_to_time_major")
    net = conv_1d(net, 9, 1, scope='logits')
    return {
        'logits': net,
        'init_state': tf.constant(0),
        'final_state': tf.constant(0),
    }
    def cnn(self):
        network = input_data(shape=[None, self.max_document_length],
                             name='input')
        network = tflearn.embedding(network, input_dim=1000000, output_dim=128)
        branch1 = conv_1d(network,
                          128,
                          3,
                          padding='valid',
                          activation='relu',
                          regularizer="L2")
        branch2 = conv_1d(network,
                          128,
                          4,
                          padding='valid',
                          activation='relu',
                          regularizer="L2")
        branch3 = conv_1d(network,
                          128,
                          5,
                          padding='valid',
                          activation='relu',
                          regularizer="L2")
        network = merge([branch1, branch2, branch3], mode='concat', axis=1)
        network = tf.expand_dims(network, 2)
        network = global_max_pool(network)
        network = dropout(network, 0.8)
        network = fully_connected(network, 2, activation='softmax')
        network = regression(network,
                             optimizer='adam',
                             learning_rate=0.001,
                             loss='categorical_crossentropy',
                             name='target')

        model = tflearn.DNN(network, tensorboard_verbose=0)
        return model
Ejemplo n.º 3
0
	def residual_block_1D(incoming,out_channels,downsample=False, first=False, filt_len=16, dropout_prob=0.85, downsampleSecond=True):
		resnet = incoming
		in_channels = incoming.shape[-1].value
		strides = (2 if downsample else 1)
		dsLayer = (1 if downsampleSecond else 0)
		identity = resnet

		nConv = 2
		if first:
			resnet = conv_1d(resnet, out_channels, filt_len, strides,weights_init="variance_scaling")
			nConv = 1

		for i in range(nConv):
			resnet = batch_normalization(resnet)
			resnet = relu(resnet)
			resnet = dropout(resnet, dropout_prob)
			if downsample and i==dsLayer: #1 as in, second layer
				resnet = conv_1d(resnet,out_channels,filt_len, strides=1, weights_init="variance_scaling") #puts the downsampling on the first conv layer only
			else:
				resnet = conv_1d(resnet,out_channels,filt_len, strides, weights_init="variance_scaling")

		#Beginning of skip connection
		identity = max_pool_1d(identity,strides, strides)

		if in_channels != out_channels:

			ch = (out_channels - in_channels) // 2
			identity = tf.pad(identity,[[0,0],[0,0],[ch,ch]])
			in_channels = out_channels

		resnet = resnet + identity
		
		return resnet
Ejemplo n.º 4
0
def multi_filter_conv_block(input, n_filters, reuse=False,
                            dropout_keep_prob=0.5, activation='relu',
                            padding='same', name='mfcb'):
    branch1 = conv_1d(input, n_filters, 1, padding=padding,
                      activation=activation, reuse=reuse,
                      scope='{}_conv_branch_1'.format(name))
    branch2 = conv_1d(input, n_filters, 3, padding=padding,
                      activation=activation, reuse=reuse,
                      scope='{}_conv_branch_2'.format(name))
    branch3 = conv_1d(input, n_filters, 5, padding=padding,
                      activation=activation, reuse=reuse,
                      scope='{}_conv_branch_3'.format(name))

    unstacked_b1 = tf.unstack(branch1, axis=1,
                              name='{}_unstack_b1'.format(name))
    unstacked_b2 = tf.unstack(branch2, axis=1,
                              name='{}_unstack_b2'.format(name))
    unstacked_b3 = tf.unstack(branch3, axis=1,
                              name='{}_unstack_b3'.format(name))

    n_grams = []
    for t_b1, t_b2, t_b3 in zip(unstacked_b1, unstacked_b2, unstacked_b3):
        n_grams.append(tf.stack([t_b1, t_b2, t_b3], axis=0))
    n_grams_merged = tf.concat(n_grams, axis=0)
    n_grams_merged = tf.transpose(n_grams_merged, perm=[1, 0, 2])
    gram_pooled = max_pool_1d(n_grams_merged, kernel_size=3, strides=3)
    cnn_out = dropout(gram_pooled, dropout_keep_prob)
    return cnn_out
Ejemplo n.º 5
0
Archivo: 17-2.py Proyecto: sunshare10/-
def  do_cnn(trainX, trainY,testX, testY):
    global n_words
    # Data preprocessing
    # Sequence padding
    trainX = pad_sequences(trainX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
    testX = pad_sequences(testX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None, MAX_DOCUMENT_LENGTH], name='input')
    network = tflearn.embedding(network, input_dim=n_words+1, output_dim=128)
    branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')
    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX, trainY, n_epoch = 20, shuffle=True, validation_set=(testX, testY), show_metric=True, batch_size=32)
Ejemplo n.º 6
0
def do_cnn_doc2vec(trainX, testX, trainY, testY):
    global max_features
    print "CNN and doc2vec"

    #trainX = pad_sequences(trainX, maxlen=max_features, value=0.)
    #testX = pad_sequences(testX, maxlen=max_features, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None,max_features], name='input')
    network = tflearn.embedding(network, input_dim=1000000, output_dim=128,validate_indices=False)
    branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.8)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')
    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX, trainY,
              n_epoch=5, shuffle=True, validation_set=(testX, testY),
              show_metric=True, batch_size=100,run_id="review")
Ejemplo n.º 7
0
def conv_model(network):
    branch1 = conv_1d(network,
                      200,
                      3,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    branch2 = conv_1d(network,
                      200,
                      4,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    branch3 = conv_1d(network,
                      200,
                      5,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.5)

    return network
Ejemplo n.º 8
0
def train_repet_network(beat_spectrum_array, sdr_array, n_epochs, take):
    """

    :param beat_spectrum_array:
    :param sdr_array:
    :param n_epochs:
    :param take:
    :return:
    """
    beat_spec_len = 432
    with tf.Graph().as_default():
        input_layer = input_data(shape=[None, beat_spec_len, 1])
        conv1 = conv_1d(input_layer, 32, 4, activation="relu", regularizer="L2")
        max_pool1 = max_pool_1d(conv1, 2)
        conv2 = conv_1d(max_pool1, 64, 80, activation="relu", regularizer="L2")
        max_pool2 = max_pool_1d(conv2, 2)
        fully1 = fully_connected(max_pool2, 128, activation="relu")
        dropout1 = dropout(fully1, 0.8)
        fully2 = fully_connected(dropout1, 256, activation="relu")
        dropout2 = dropout(fully2, 0.8)
        linear = fully_connected(dropout2, 1, activation="linear")
        regress = tflearn.regression(linear, optimizer="rmsprop", loss="mean_square", learning_rate=0.001)

        # Training
        model = tflearn.DNN(regress)  # , session=sess)
        model.fit(
            beat_spectrum_array,
            sdr_array,
            n_epoch=n_epochs,
            snapshot_step=1000,
            show_metric=True,
            run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take),
        )

        return model
Ejemplo n.º 9
0
def model_fn(net, X_len, max_reach, block_size, out_classes, batch_size, dtype,
             **kwargs):
    """
        Args:
        net -> Input tensor shaped (batch_size, max_reach + block_size + max_reach, 3)
        Returns:
        logits -> Unscaled logits tensor in time_major form, (block_size, batch_size, out_classes)
    """

    for block in range(1, 4):
        with tf.variable_scope("block%d" % block):
            for layer in range(1, 1 + 1):
                with tf.variable_scope('layer_%d' % layer):
                    net = conv_1d(net, 32, 3)
            net = max_pool_1d(net, 2)
        net = tf.nn.relu(net)

    net = central_cut(net, block_size, 8)
    net = tf.transpose(net, [1, 0, 2], name="Shift_to_time_major")
    net = conv_1d(net, 9, 1, scope='logits')
    return {
        'logits': net,
        'init_state': tf.constant(0),
        'final_state': tf.constant(0),
    }
Ejemplo n.º 10
0
def do_cnn_word2vec(trainX, testX, trainY, testY):
    global max_features
    print "CNN and word2vec"

    #trainX = pad_sequences(trainX, maxlen=max_document_length, value=-1.)
    #testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None,max_features], name='input')
    network = tflearn.embedding(network, input_dim=1000000, output_dim=128)
    branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.8)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')
    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX, trainY,
              n_epoch=5, shuffle=True, validation_set=(testX, testY),
              show_metric=True, batch_size=2,run_id="spam")
Ejemplo n.º 11
0
def build_network(optimizer):
    net = input_data(shape=[None, length], name='input')
    net = tflearn.embedding(net,
                            input_dim=caes_ngram_data.dims,
                            output_dim=128)
    branch1 = conv_1d(net, 128, 3,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    branch2 = conv_1d(net, 128, 4,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    branch3 = conv_1d(net, 128, 5,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    net = merge([branch1, branch2, branch3], mode='concat', axis=1)
    net = tf.expand_dims(net, 2)
    net = global_max_pool(net)
    net = dropout(net, 0.33)
    net = fully_connected(net, 6, activation='softmax')
    net = regression(net,
                     optimizer=optimizer,
                     learning_rate=0.001,
                     loss='categorical_crossentropy',
                     name='target')
    return net
def get_cnn_model(max_len, volcab_size):
    # Building convolutional network
    network = tflearn.input_data(shape=[None, max_len], name='input')
    network = tflearn.embedding(network, input_dim=volcab_size, output_dim=64)
    branch1 = conv_1d(network,
                      128,
                      3,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    branch2 = conv_1d(network,
                      128,
                      4,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    branch3 = conv_1d(network,
                      128,
                      5,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.5)
    network = fully_connected(network, 3, activation='softmax')
    network = regression(network,
                         optimizer='adam',
                         learning_rate=0.001,
                         loss='categorical_crossentropy',
                         name='target')
    model = tflearn.DNN(network, tensorboard_verbose=0)
    return model
Ejemplo n.º 13
0
def cnn_3_filters(trainX, trainY, valX, valY, testX, input_weights):
    '''
	A CNN with three convolutional layers as in Kim Yoon (Convolutional Neural Networks for Sentence Classification)

	'''
    # Building convolutional network
    network = input_data(shape=[None, MAX_LENGHT], name='input')
    network = tflearn.embedding(network,
                                input_dim=input_weights.shape[0],
                                output_dim=input_weights.shape[1],
                                trainable=True,
                                name="EmbeddingLayer")
    branch1 = conv_1d(network,
                      128,
                      3,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    branch2 = conv_1d(network,
                      128,
                      4,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    branch3 = conv_1d(network,
                      128,
                      5,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.5)
    network = fully_connected(network, 12, activation='softmax')
    network = regression(network,
                         optimizer='adam',
                         learning_rate=0.001,
                         loss='categorical_crossentropy',
                         name='target')
    # Training
    model = tflearn.DNN(network, tensorboard_verbose=1)

    # Add embedding weights into the embedding layer
    embeddingWeights = tflearn.get_layer_variables_by_name("EmbeddingLayer")[0]
    model.set_weights(embeddingWeights, input_weights)

    print("Start trianing CNN...")
    model.fit(trainX,
              trainY,
              n_epoch=NB_EPOCHS,
              validation_set=(valX, valY),
              shuffle=True,
              show_metric=True,
              batch_size=32)

    y_result = model.predict(testX)
    return y_result
Ejemplo n.º 14
0
def do_cnn(x,y):
    print("start CNN......")
    global max_document_length
    print("CNN and tf")
    trainX, testX, trainY, testY = train_test_split(x, y, test_size=0.4, random_state=0)
    y_test=testY
    # 训练、测试数据进行填充和转换,不到最大长度的数据填充0
    trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
    testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
    # Converting labels to binary vectors
    # 二分类问题,把标记数据二值化
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None,max_document_length], name='input')
    # 三个数量为128,长度分别为3,4,5的一维卷积函数处理数据
    network = tflearn.embedding(network, input_dim=1000000, output_dim=128)
    branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.8)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')
    # 实例化CNN对象并进行训练数据,训练5轮
    model = tflearn.DNN(network, tensorboard_verbose=0)
    if not os.path.exists(pkl_file):
        model.fit(trainX, trainY,
                  n_epoch=5, shuffle=True, validation_set=0.1,
                  show_metric=True, batch_size=100,run_id="webshell")
        model.save(pkl_file)
    else:
        model.load(pkl_file)

    y_predict_list=model.predict(testX)

    y_predict=[]
    for i in y_predict_list:
        print(i[0])
        if i[0] > 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)
    print('y_predict_list:')
    print(y_predict_list)
    print('y_predict:')
    print(y_predict)
    #print  y_test

    do_metrics(y_test, y_predict)
Ejemplo n.º 15
0
def do_cnn(x,y):
    global max_document_length
    print "CNN and tf"
    trainX, testX, trainY, testY = train_test_split(x, y, test_size=0.4, random_state=0)
    y_test=testY

    trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
    testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None,max_document_length], name='input')
    network = tflearn.embedding(network, input_dim=1000000, output_dim=128)
    branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.8)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')

    model = tflearn.DNN(network, tensorboard_verbose=0)
    #if not os.path.exists(pkl_file):
        # Training
    model.fit(trainX, trainY,
                  n_epoch=5, shuffle=True, validation_set=0.1,
                  show_metric=True, batch_size=100,run_id="webshell")
    #    model.save(pkl_file)
    #else:
    #    model.load(pkl_file)

    y_predict_list=model.predict(testX)
    #y_predict = list(model.predict(testX,as_iterable=True))

    y_predict=[]
    for i in y_predict_list:
        print  i[0]
        if i[0] > 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)
    print 'y_predict_list:'
    print y_predict_list
    print 'y_predict:'
    print  y_predict
    #print  y_test

    do_metrics(y_test, y_predict)
Ejemplo n.º 16
0
def load_audio_convnet(filename):
    input_layer = input_data(shape=[None, MAX_CONV, 1])
    conv_layer_1  = conv_1d(input_layer, nb_filter=8, filter_size=79, activation='relu', name='conv_layer_1')
    pool_layer_1  = max_pool_1d(conv_layer_1, 100, name='pool_layer_1')
    conv_layer_2  = conv_1d(pool_layer_1, nb_filter=16, filter_size=11, activation='relu', name='conv_layer_2')
    pool_layer_2  = max_pool_1d(conv_layer_2, 5, name='pool_layer_2')
    fc_layer_1  = fully_connected(pool_layer_2, 100, activation='tanh', name='fc_layer_1')
    fc_layer_2 = fully_connected(fc_layer_1, 3, activation='softmax', name='fc_layer_2')
    network = regression(fc_layer_2, optimizer='sgd', loss='categorical_crossentropy', learning_rate=0.1)
    model = tflearn.DNN(network)
    model.load(filename)
    return model
Ejemplo n.º 17
0
def main():
    pickle_folder = '../pickles_rolloff'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
    pickle_folders_to_load = sorted(pickle_folders_to_load)

    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'beat_spec'
    beat_spec_len = 432

    # training params
    n_classes = 16
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00


    # set up training, testing, & validation partitions
    beat_spec_array, sdr_array = load_beat_spec_and_sdrs(pickle_folders_to_load, pickle_folder,
                                                         feature, fg_or_bg, sdr_type)

    train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent,
                                            testing_percent, validation_percent)

    trainX = np.expand_dims([beat_spec_array[i] for i in train], -1)
    trainY = np.expand_dims([sdr_array[i] for i in train], -1)
    testX = np.expand_dims([beat_spec_array[i] for i in test], -1)
    testY = np.array([sdr_array[i] for i in test])

    # Building convolutional network
    network = input_data(shape=[None, beat_spec_len, 1])
    network = conv_1d(network, 32, 4, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = conv_1d(network, 64, 80, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='relu') # look for non-tanh things???
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)

    # Training
    model = tflearn.DNN(regress, tensorboard_verbose=1)
    model.fit(trainX, trainY, n_epoch=100,
              snapshot_step=1000, show_metric=True, run_id='relus_100_3')

    predicted = np.array(model.predict(testX))[:,0]
    # pprint.pprint()
    print("Test MSE: ", np.square(testY - predicted).mean())
    plot(testY, predicted)
 def create_cnn_architecture_two_layers(
         self,
         model_name,
         outputDim=300,
         number_of_filters=60,
         filterSize=[3, 4],
         padding='same',
         activation_function_convLayer='relu',
         regularizer='L2',
         dropouts=0.5,
         activation_function_fc='softmax',
         optimizer='adam',
         learning_rate=0.001,
         loss_function='categorical_crossentropy'):
     if len(filterSize) == 0:
         filterSize = [3, 4]
     """ Define input shape and create word embedding """
     self.cnn_model = input_data(shape=[None, self.max_words], name='input')
     self.cnn_model = tflearn.embedding(
         self.cnn_model,
         input_dim=len(self.vocabProcessor.vocabulary_),
         output_dim=outputDim)
     """ Add three/two convolutional layer. Set number of filters and filter sizes and then merge together """
     conv1 = conv_1d(self.cnn_model,
                     nb_filter=number_of_filters,
                     filter_size=filterSize[0],
                     padding=padding,
                     activation=activation_function_convLayer,
                     regularizer=regularizer)
     conv2 = conv_1d(self.cnn_model,
                     nb_filter=number_of_filters,
                     filter_size=filterSize[1],
                     padding=padding,
                     activation=activation_function_convLayer,
                     regularizer=regularizer)
     #conv3 = conv_1d(cnn_model, nb_filter = 128,  filter_size = 5, padding = 'same',
     #                 activation = 'relu', regularizer = 'L2')
     self.cnn_model = merge([conv1, conv2], mode='concat', axis=1)
     """ Expand one dimension to fit the max_pooling layer """
     self.cnn_model = tf.expand_dims(self.cnn_model, 1)
     self.cnn_model = global_max_pool(self.cnn_model)
     """ Instantiate dropout layer and specify dropout parameter """
     self.cnn_model = dropout(self.cnn_model, dropouts)
     """ Instantiate fully connected layer and regression layer. """
     self.cnn_model = fully_connected(self.cnn_model,
                                      self.number_of_classes,
                                      activation=activation_function_fc)
     self.cnn_model = regression(self.cnn_model,
                                 optimizer=optimizer,
                                 learning_rate=learning_rate,
                                 loss=loss_function,
                                 name='models/' + model_name)
def main():
    pickle_folder = 'pickles_combined'


    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'beat_spec'

    # training params
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00
    beat_spec_max = 355


    # set up training, testing, & validation partitions
    beat_spec_array, sdr_array = unpickle_beat_spec_and_sdrs(pickle_folder, beat_spec_max)

    train, test, validate = split_into_sets(len(beat_spec_array), training_percent,
                                            testing_percent, validation_percent)

    trainX = np.expand_dims([beat_spec_array[i] for i in train], -1)
    trainY = np.expand_dims([sdr_array[i] for i in train], -1)
    testX = np.expand_dims([beat_spec_array[i] for i in test], -1)
    testY = np.array([sdr_array[i] for i in test])

    # Building convolutional network
    network = input_data(shape=[None, beat_spec_max, 1])
    network = conv_1d(network, 32, 4, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = conv_1d(network, 64, 80, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='relu') # look for non-tanh things???
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)

    start = time.time()
    # Training
    model = tflearn.DNN(regress, tensorboard_verbose=1)
    model.fit(trainX, trainY, n_epoch=2000,
              snapshot_step=1000, show_metric=True, run_id='mir1k_2000_truncate')
    elapsed = (time.time() - start)

    predicted = np.array(model.predict(testX))[:,0]
    print("Test MSE: ", np.square(testY - predicted).mean())
    print(elapsed, "seconds")
    plot(testY, predicted)
Ejemplo n.º 20
0
    def buildNet(self):
        #necessary to allow the model to be loaded after it has been trained
        tf.reset_default_graph()

        #build input layer to accept 140 chars
        network = input_data(shape=[None, 72], name='input')
        #embedded layer
        network = tflearn.embedding(network,
                                    input_dim=len(self.vp.vocabulary_) + 2,
                                    output_dim=128)

        #create three convolutional layers
        branch1 = conv_1d(network,
                          128,
                          3,
                          padding='valid',
                          activation='relu',
                          regularizer="L2")
        branch2 = conv_1d(network,
                          128,
                          4,
                          padding='valid',
                          activation='relu',
                          regularizer="L2")
        branch3 = conv_1d(network,
                          128,
                          5,
                          padding='valid',
                          activation='relu',
                          regularizer="L2")

        #merge all incoming tensors into a single tenso
        network = merge([branch1, branch2, branch3], mode='concat', axis=1)

        #expand dimensions of network to 3d tensor, as input is a 1d tensor
        network = tf.expand_dims(network, 2)
        #perform reduction operation over input tensor
        network = global_max_pool(network)

        #prevent overfitting by including dropout
        network = dropout(network, 0.8)

        #output layer
        network = fully_connected(network, 8, activation='softmax')
        network = regression(network,
                             optimizer='adam',
                             learning_rate=0.0001,
                             loss='categorical_crossentropy',
                             name='target')

        return network
Ejemplo n.º 21
0
def do_cnn_1d(trainX, testX, trainY, testY):

    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=4)
    testY = to_categorical(testY, nb_classes=4)

    # Building convolutional network
    network = input_data(shape=[None, 1000], name='input')
    network = tflearn.embedding(network,
                                input_dim=1000000,
                                output_dim=128,
                                validate_indices=False)
    branch1 = conv_1d(network,
                      128,
                      3,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    branch2 = conv_1d(network,
                      128,
                      4,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    branch3 = conv_1d(network,
                      128,
                      5,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.8)
    network = fully_connected(network, 4, activation='softmax')
    network = regression(network,
                         optimizer='adam',
                         learning_rate=0.001,
                         loss='categorical_crossentropy',
                         name='target')
    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX,
              trainY,
              n_epoch=5,
              shuffle=True,
              validation_set=(testX, testY),
              show_metric=True,
              batch_size=100,
              run_id="malware")
def model_fn(net, X_len, max_reach, block_size, out_classes, batch_size, dtype,
             **kwargs):
    """
        Args:
        net -> Input tensor shaped (batch_size, max_reach + block_size + max_reach, 3)
        Returns:
        logits -> Unscaled logits tensor in time_major form, (block_size, batch_size, out_classes)
    """

    print("model in", net.get_shape())
    for block in range(1, 4):
        with tf.variable_scope("block%d" % block):
            if block > 1:
                net = tf.expand_dims(net, 3)
                net = tf.layers.max_pooling2d(net, [1, 2], [1, 2])
                net = tf.squeeze(net, axis=3)

            for layer in range(kwargs['num_layers']):
                with tf.variable_scope('layer_%d' % layer):
                    res = net
                    for sublayer in range(kwargs['num_sub_layers']):
                        res = batch_normalization(res,
                                                  scope='bn_%d' % sublayer)
                        res = tf.nn.relu(res)
                        res = conv_1d(
                            res,
                            32 * 2**(4 - block),
                            3,
                            scope="conv_1d_%d" % sublayer,
                            weights_init=variance_scaling_initializer(
                                dtype=dtype))
                    k = tf.get_variable(
                        "k",
                        initializer=tf.constant_initializer(1.0),
                        shape=[])
                    net = tf.nn.relu(k) * res + net
            net = max_pool_1d(net, 2)
        net = tf.nn.relu(net)

    net = central_cut(net, block_size, 8)
    print("after slice", net.get_shape())
    net = tf.transpose(net, [1, 0, 2], name="Shift_to_time_major")
    print("after transpose", net.get_shape())
    net = conv_1d(net, 9, 1, scope='logits')
    print("model out", net.get_shape())
    return {
        'logits': net,
        'init_state': tf.constant(0),
        'final_state': tf.constant(0),
    }
Ejemplo n.º 23
0
def build_tflearn_cnn(length):
    input_layer = input_data(shape=[None, length, 1])

    # Convolution Layer
    conv_layer_1 = conv_1d(input_layer,
                           nb_filter=512,
                           filter_size=10,
                           activation='relu',
                           name='conv_layer_1',
                           weights_init='xavier',
                           regularizer="L2")
    pool_layer_1 = max_pool_1d(conv_layer_1, 4, name='pool_layer_1')

    conv_layer_2 = conv_1d(pool_layer_1,
                           nb_filter=512,
                           filter_size=5,
                           activation='relu',
                           name='conv_layer_2',
                           weights_init='xavier',
                           regularizer="L2")
    pool_layer_3 = max_pool_1d(conv_layer_2, 4, name='pool_layer_3')
    # flat = flatten(pool_layer_3)

    fc_layer_4 = fully_connected(pool_layer_3,
                                 256,
                                 activation='relu',
                                 name='fc_layer_4',
                                 regularizer='L2')
    drop_2 = dropout(fc_layer_4, drop_out_prob)
    fc_layer_5 = fully_connected(drop_2,
                                 128,
                                 activation='relu',
                                 name='fc_layer_5',
                                 regularizer='L2')
    drop_3 = dropout(fc_layer_5, drop_out_prob)

    # Output
    fc_layer_2 = fully_connected(drop_3,
                                 3,
                                 activation='softmax',
                                 name='output')
    network = regression(fc_layer_2,
                         optimizer='adam',
                         loss='softmax_categorical_crossentropy',
                         learning_rate=0.0001,
                         metric='accuracy')
    model = tflearn.DNN(network, tensorboard_verbose=0)

    return model
Ejemplo n.º 24
0
def model_2(train_x, train_y, test_x, test_y, embedding_size):
    # train_x = pad_sequences(train_x, maxlen=100, value=0.)
    # test_x = pad_sequences(test_x, maxlen=100, value=0.)

    out_dim = embedding_size  # embedding size
    num_cat = len(train_y[0])

    network = input_data(shape=[None, len(train_x[0])], name='input')
    network = tflearn.embedding(network,
                                input_dim=len(train_x[0]),
                                output_dim=out_dim)  # input_dim - vocab size
    branch1 = conv_1d(network,
                      out_dim,
                      3,
                      padding='same',
                      activation='relu',
                      regularizer="L2")
    branch2 = conv_1d(network,
                      out_dim,
                      4,
                      padding='same',
                      activation='relu',
                      regularizer="L2")
    branch3 = conv_1d(network,
                      out_dim,
                      5,
                      padding='same',
                      activation='relu',
                      regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.5)
    network = fully_connected(network, num_cat, activation='softmax')
    network = regression(network,
                         optimizer='adam',
                         learning_rate=0.001,
                         loss='categorical_crossentropy',
                         name='target')
    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(train_x,
              train_y,
              n_epoch=5,
              shuffle=True,
              validation_set=(test_x, test_y),
              show_metric=True,
              batch_size=32)
    return model
def nlp_cnn(trainX, trainY, testX, testY):
    # pad the sequence
    trainX = pad_sequences(trainX, maxlen=100, value=0.)
    testX = pad_sequences(testX, maxlen=100, value=0.)
    # one_hot encoding
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)
    # build an embedding
    network = input_data(shape=[None, 100], name='input')
    network = tflearn.embedding(network, input_dim=10000, output_dim=128)
    # build an convnet
    branch1 = conv_1d(network,
                      128,
                      3,
                      padding='valid',
                      activation='relu',
                      regularizer='L2')
    branch2 = conv_1d(network,
                      128,
                      4,
                      padding='valid',
                      activation='relu',
                      regularizer='L2')
    branch3 = conv_1d(network,
                      128,
                      5,
                      padding='valid',
                      activation='relu',
                      regularizer='L2')
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network,
                         optimizer='adam',
                         learning_rate=0.001,
                         loss='categorical_crossentropy',
                         name='target')
    # training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX,
              trainY,
              n_epoch=5,
              shuffle=True,
              validation_set=(testX, testY),
              show_metric=True,
              batch_size=32)
def main():
    """

    :return:
    """
    pickle_folder = '../Repet/pickles_rolloff'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]

    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'beat_spec'
    beat_spec_len = 432
    n_epochs = 200
    take = 1

    # set up training, testing, & validation partitions
    beat_spec_array, sdr_array = load_beat_spec_and_sdrs(pickle_folders_to_load, pickle_folder,
                                                         feature, fg_or_bg, sdr_type)

    beat_spec_array = np.expand_dims(beat_spec_array, -1)
    sdr_array = np.expand_dims(sdr_array, -1)

    # Building convolutional network
    network = input_data(shape=[None, beat_spec_len, 1])
    network = conv_1d(network, 32, 4, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = conv_1d(network, 64, 80, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='relu')  # look for non-tanh things???
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='rmsprop', loss='mean_square', learning_rate=0.001)

    start = time.time()
    # Training
    model = tflearn.DNN(regress)  # , session=sess)
    model.fit(beat_spec_array, sdr_array, n_epoch=n_epochs,
              snapshot_step=1000, show_metric=True,
              run_id='repet_save_{0}_epochs_take_{1}'.format(n_epochs, take))
    elapsed = (time.time() - start)
    print('Finished training after ' + elapsed + 'seconds. Saving...')

    model_output_folder = 'network_outputs/'
    model_output_file = join(model_output_folder, 'repet_save_{0}_epochs_take_{1}'.format(n_epochs, take))

    model.save(model_output_file)
Ejemplo n.º 27
0
 def transform_embedded_sequences(self, embedded_sequences):
     net = conv_1d(embedded_sequences, self.filters, 5, 1, activation='relu', padding="valid")
     net = max_pool_1d(net, 5, padding="valid")
     if self.dropout_rate > 0:
         net = dropout(net, self.dropout_rate)
     net = conv_1d(net, self.filters, 5, activation='relu', padding="valid")
     net = max_pool_1d(net, 5, padding="valid")
     if self.dropout_rate > 0:
         net = dropout(net, self.dropout_rate)
     net = conv_1d(net, self.filters, 5, activation='relu', padding="valid")
     net = max_pool_1d(net, 35)
     if self.dropout_rate > 0:
         net = dropout(net, self.dropout_rate)
     net = fully_connected(net, self.filters, activation='relu')
     preds = fully_connected(net, self.class_count, activation='softmax')
     return preds
Ejemplo n.º 28
0
    def transform_embedded_sequences(self, embedded_sequences):
        drop_1, drop_2 = self.dropout_rates
        net = dropout(embedded_sequences, drop_1)

        conv_blocks = []
        for sz in self.filter_sizes:
            conv = conv_1d(net,
                           nb_filter=self.num_filters,
                           filter_size=sz,
                           padding="valid",
                           activation="relu",
                           regularizer="L2")
            conv_blocks.append(conv)

        net = merge(conv_blocks, mode='concat',
                    axis=1) if len(conv_blocks) > 1 else conv_blocks[0]
        net = tf.expand_dims(net, 2)
        net = global_max_pool(net)
        net = dropout(net, drop_2)

        model_output = fully_connected(net,
                                       self.class_count,
                                       activation="softmax")

        return model_output
Ejemplo n.º 29
0
def do_cnn_wordbad_tfidf(trainX, testX, trainY, testY):
    trainX = pad_sequences(trainX, value=0.)
    testX = pad_sequences(testX, value=0.)

    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    network = input_data(name='input')
    network = tflearn.embedding(network, input_dim=1000000, output_dim=128)
    branch1 = conv_1d(network,
                      128,
                      3,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    branch2 = conv_1d(network,
                      128,
                      4,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    branch3 = conv_1d(network,
                      128,
                      5,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.8)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network,
                         optimizer='adam',
                         learning_rate=0.001,
                         loss='categorical_crossentropy',
                         name='target')

    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX,
              trainY,
              n_epoch=5,
              shuffle=True,
              validation_set=(testX, testY),
              show_metric=True,
              batch_size=100,
              run_id="url")
Ejemplo n.º 30
0
    def sentiment_analysis(self, sentencedata):

        unique_words = self.uniqueword_csvload()

        neurons = len(unique_words)

        reset_default_graph()
        network = input_data(shape=[None, 1, neurons])
        network = conv_1d(network, 8, 3, activation='relu')
        network = max_pool_1d(network, 3)

        network = conv_1d(network, 16, 3, activation='relu')
        network = max_pool_1d(network, 3)

        network = fully_connected(network, 8, activation='relu')
        network = dropout(network, 0.5)

        network = fully_connected(network, 2, activation='softmax')
        network = regression(network,
                             optimizer='adam',
                             learning_rate=0.01,
                             loss='categorical_crossentropy')

        model = tflearn.DNN(network)
        model.load(
            "./model/thaitext-classifier-combined_inhousedata-UTF8-4-100.tfl")

        input_sentencedata = self.preprocess_server_2(sentencedata)[0]
        #input_uniquewords = self.get_uniquewords(input_sentencedata)
        sentences = []
        #f = open(file_path, 'r')
        for word in input_sentencedata:
            sentences.append(word)
        vector_one = []
        inner_vector = []
        for word in unique_words:
            if word in sentences:
                vector_one.append(1)
            else:
                vector_one.append(0)
        inner_vector.append(vector_one)
        inner_vector = np.array(inner_vector, dtype=np.float32)
        print("inner_vector:", inner_vector)
        label = model.predict_label([inner_vector])
        pred = model.predict([inner_vector])

        return pred
def main():
    """

    :return:
    """
    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'sim_mat'
    beat_spec_len = 432

    # set up training, testing, & validation partitions
    sim_mat_array, sdr_array = get_generated_data(feature, fg_or_bg, sdr_type)

    # training params
    n_classes = 10
    n_training_steps = 1000
    training_step_size = 100
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00

    sdr_array_1h, hist = sdrs_to_one_hots(sdr_array, n_classes, True)

    train, test, validate = split_into_sets(len(sim_mat_array), training_percent,
                                            testing_percent, validation_percent)

    # Building convolutional network
    network = input_data(shape=[None, beat_spec_len, 1], name='input')
    network = conv_1d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    # network = local_response_normalization(network)
    # network = batch_normalization(network)
    # network = conv_1d(network, 64, 3, activation='relu', regularizer="L2")
    # network = max_pool_1d(network, 2)
    # network = local_response_normalization(network)
    # network = batch_normalization(network)
    # network = fully_connected(network, 128, activation='tanh')
    # network = dropout(network, 0.5)
    network = fully_connected(network, 512, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, n_classes, activation='softmax')
    # network = fully_connected(network, 1, activation='linear')
    network = regression(network, optimizer='adagrad', learning_rate=0.01,
                         loss='categorical_crossentropy', name='target')

    X = np.expand_dims(sim_mat_array, -1)
    Y = np.array(sdr_array_1h)
    # X = np.expand_dims([beat_spec_array[i] for i in train], -1)
    # Y = np.array([sdr_array_1h[i] for i in train])
    # testX = np.expand_dims([beat_spec_array[i] for i in test], -1)
    # testY = np.array([sdr_array[i] for i in test])

    # Training
    model = tflearn.DNN(network, tensorboard_verbose=1)
    model.fit({'input': X}, {'target': Y}, n_epoch=20,
              validation_set=0.1,
              snapshot_step=1000, show_metric=True, run_id='{} classes'.format(n_classes - 1))
def cnn(inp_dim, vocab_size, embed_size, num_classes, learn_rate):
    tf.reset_default_graph()
    network = input_data(shape=[None, inp_dim], name='input')
    network = tflearn.embedding(network,
                                input_dim=vocab_size,
                                output_dim=embed_size,
                                name="EmbeddingLayer")
    network = dropout(network, 0.25)
    branch1 = conv_1d(network,
                      embed_size,
                      3,
                      padding='valid',
                      activation='relu',
                      regularizer="L2",
                      name="layer_1")
    branch2 = conv_1d(network,
                      embed_size,
                      4,
                      padding='valid',
                      activation='relu',
                      regularizer="L2",
                      name="layer_2")
    branch3 = conv_1d(network,
                      embed_size,
                      5,
                      padding='valid',
                      activation='relu',
                      regularizer="L2",
                      name="layer_3")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.50)
    network = fully_connected(network,
                              num_classes,
                              activation='softmax',
                              name="fc")
    network = regression(network,
                         optimizer='adam',
                         learning_rate=learn_rate,
                         loss='categorical_crossentropy',
                         name='target')

    model = tflearn.DNN(network, tensorboard_verbose=0)
    return model
Ejemplo n.º 33
0
def M_cnn(Input, num, training, tt_ll):
    branch0 = conv_1d(Input, tt_ll, num, padding='same', activation='linear')
    branch0 = tf.layers.batch_normalization(branch0, training=training)
    branch0 = tf.nn.relu(branch0)
    """
    branch0 = conv_1d(branch0, 32, 1, padding='same', activation='linear')
    branch0 = tf.layers.batch_normalization(branch0, training=training)
    branch0 = tf.nn.relu(branch0)
    """
    return branch0
Ejemplo n.º 34
0
def get_cnn(df):
    x_input_size = df.shape[0]
    net = input_data(shape=[None, x_input_size, 1], name='input')
    net = batch_normalization(net)
    net = avg_pool_1d(net, 3)  ### mean window 3
    net = conv_1d(net, 3, 6)  ### num of conv filters, size of filters
    net = fully_connected(net, 10, activation='tanh')
    net = dropout(net, 0.8)
    net = fully_connected(net, 2, activation='softmax')
    return net
def CNN(max_length,n_words,n_classes,n_units):
    '''
    define CNN model
    '''
    net = tflearn.input_data(shape=[None, max_length], name='input')
    net = tflearn.embedding(net, input_dim=n_words, output_dim=n_units)
    branch1 = conv_1d(net, n_units, 3, padding='valid',
                      activation='relu', regularizer="L2")
    branch2 = conv_1d(net, n_units, 4, padding='valid',
                      activation='relu', regularizer="L2")
    branch3 = conv_1d(net, n_units, 5, padding='valid',
                      activation='relu', regularizer="L2")
    net = tflearn.merge([branch1, branch2, branch3], mode='concat', axis=1)
    net = tf.expand_dims(net, 2)
    net = global_max_pool(net)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, n_classes, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
                             loss='categorical_crossentropy')
    return net
def build_model_specific():
    ### IS ANY OF THIS NECESSARY FOR LIGHT/DARK? IN GENERAL W/ STAIONARY CAMERA?
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()

    # Specify shape of the data, image prep
    network = input_data(shape=[None, 52, 64],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)

    # conv_2d incoming, nb_filter, filter_size
    # incoming: Tensor. Incoming 4-D Tensor.
    # nb_filter: int. The number of convolutional filters. # WHAT IS THIS?
    # filter_size: 'intor list ofints`. Size of filters.   # WHAT IS THIS?
    network = conv_1d(network, 512, 3, activation='relu')

    # (incoming, kernel_size)
    # incoming: Tensor. Incoming 4-D Layer.
    # kernel_size: 'intor list ofints`. Pooling kernel size.
    network = max_pool_1d(network, 2)

    network = conv_1d(network, 64, 3, activation='relu')
    network = conv_1d(network, 64, 3, activation='relu')
    network = max_pool_1d(network, 2)

    network = fully_connected(network, 512, activation='relu')

    network = dropout(network, 0.5)

    network = fully_connected(network, 4, activation='softmax')

    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.0003)

    model = tflearn.DNN(network, tensorboard_verbose=0)
    return model
Ejemplo n.º 37
0
def make_audio_convnet():
    global buzz_train_d_conv
    buzz_test_d_conv = buzz_train_d_conv

    input_layer = input_data(shape=[None, MAX_CONV, 1])
    conv_layer_1 = conv_1d(input_layer,
                           nb_filter=8,
                           filter_size=79,
                           activation='relu',
                           name='conv_layer_1')
    pool_layer_1 = max_pool_1d(conv_layer_1, 100, name='pool_layer_1')
    conv_layer_2 = conv_1d(pool_layer_1,
                           nb_filter=16,
                           filter_size=11,
                           activation='relu',
                           name='conv_layer_2')
    pool_layer_2 = max_pool_1d(conv_layer_2, 5, name='pool_layer_2')
    fc_layer_1 = fully_connected(pool_layer_2,
                                 100,
                                 activation='tanh',
                                 name='fc_layer_1')
    fc_layer_2 = fully_connected(fc_layer_1,
                                 3,
                                 activation='softmax',
                                 name='fc_layer_2')
    network = regression(fc_layer_2,
                         optimizer='sgd',
                         loss='categorical_crossentropy',
                         learning_rate=0.1)
    model = tflearn.DNN(network)

    model.fit(buzz_train_d_conv[0],
              buzz_train_d_conv[1],
              n_epoch=30,
              shuffle=True,
              validation_set=(buzz_test_d_conv[0], buzz_test_d_conv[1]),
              show_metric=True,
              batch_size=100,
              run_id='audio_convnet')
    model.save('pck_nets/AudioConvNet.tfl')
def train_network(x, y, x_dev, y_dev):
    network = input_data(shape=[None, 9, 1])

    network = conv_1d(network, 32, 3, activation='relu')

    network = conv_1d(network, 64, 3, activation='relu')

    network = conv_1d(network, 64, 3, activation='relu')

    # network = fully_connected(network, 512, activation='relu')

    # network = dropout(network, 0.5)

    network = fully_connected(network, 2, activation='sigmoid')

    network = regression(network, optimizer='momentum', learning_rate=0.001)

    model = tflearn.DNN(network, tensorboard_verbose=0)

    model.fit(x, y, n_epoch=5, validation_set=(x_dev, y_dev), show_metric=True)

    return model
Ejemplo n.º 39
0
def main():
    pickle_folder = '../pickles_rolloff'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
    pickle_folders_to_load = sorted(pickle_folders_to_load)

    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'beat_spec'
    beat_spec_len = 432

    # training params
    n_classes = 16
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00


    # set up training, testing, & validation partitions
    beat_spec_array, sdr_array = load_beat_spec_and_sdrs(pickle_folders_to_load, pickle_folder,
                                                         feature, fg_or_bg, sdr_type)

    train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent,
                                            testing_percent, validation_percent)

    trainX = np.expand_dims([beat_spec_array[i] for i in train], -1)
    trainY = np.expand_dims([sdr_array[i] for i in train], -1)
    testX = np.expand_dims([beat_spec_array[i] for i in test], -1)
    testY = np.array([sdr_array[i] for i in test])

    # Building convolutional network
    input = input_data(shape=[None, beat_spec_len, 1])
    conv1 = conv_1d(input, 32, 10, activation='relu', regularizer="L2")
    max_pool1 = max_pool_1d(conv1, 2)
    full = fully_connected(max_pool1, 512, activation='tanh')
    # single = tflearn.single_unit(full)
    single = fully_connected(full, 1, activation='linear')
    regress = tflearn.regression(single, optimizer='sgd', loss='mean_square', learning_rate=0.01)

    # Training
    model = tflearn.DNN(regress, tensorboard_verbose=1)
    model.fit(trainX, trainY, n_epoch=500,
              snapshot_step=1000, show_metric=True, run_id='{} classes'.format(n_classes - 1))

    predicted = np.array(model.predict(testX))[:,0]
    plot(testY, predicted)
Ejemplo n.º 40
0
def main():
    pickle_folder = '../pickles_rolloff'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
    pickle_folders_to_load = sorted(pickle_folders_to_load)
    length = len(pickle_folders_to_load)

    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'beat_spec'
    beat_spec_len = 432
    n_folds = 10
    n_epochs = 200
    take = 1

    output_folder = 'cross_{0}_folds_{1}_epochs_take{2}/'.format(n_folds, n_epochs, take)

    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    summary_file = output_folder + 'summary_{0}_folds_{1}_epochs.txt'.format(n_folds, n_epochs)

    # set up training, testing, & validation partitions
    beat_spec_array, sdr_array = load_beat_spec_and_sdrs(pickle_folders_to_load, pickle_folder,
                                                         feature, fg_or_bg, sdr_type)

    perm = np.random.permutation(len(pickle_folders_to_load))  # random permutation of indices
    folds = np.array_split(perm, n_folds)  # splits into folds
    predicted = []

    for fold in range(n_folds):
        train_beat_spec = np.expand_dims([beat_spec_array[i] for i in range(length) if i not in folds[fold]], -1)
        train_sdr = np.expand_dims([sdr_array[i] for i in range(length) if i not in folds[fold]], -1)
        test_beat_spec = np.expand_dims([beat_spec_array[i] for i in folds[fold]], -1)
        test_sdr = np.expand_dims([sdr_array[i] for i in folds[fold]], -1)

        with tf.Graph().as_default():
            # Building convolutional network
            network = input_data(shape=[None, beat_spec_len, 1])
            network = conv_1d(network, 32, 4, activation='relu', regularizer="L2")
            network = max_pool_1d(network, 2)
            network = conv_1d(network, 64, 80, activation='relu', regularizer="L2")
            network = max_pool_1d(network, 2)
            network = fully_connected(network, 128, activation='relu')
            network = dropout(network, 0.8)
            network = fully_connected(network, 256, activation='relu')  # look for non-tanh things???
            network = dropout(network, 0.8)
            network = fully_connected(network, 1, activation='linear')
            regress = tflearn.regression(network, optimizer='rmsprop', loss='mean_square', learning_rate=0.001)

            start = time.time()
            # Training
            model = tflearn.DNN(regress)#, session=sess)
            model.fit(train_beat_spec, train_sdr, n_epoch=n_epochs,
                      snapshot_step=1000, show_metric=True,
                      run_id='relus_{0}_{1}_of_{2}'.format(n_epochs, fold+1, n_folds))
            elapsed = (time.time() - start)

        prediction = np.array(model.predict(test_beat_spec))[:, 0]
        with open(output_folder + 'predictions_fold{}.txt'.format(fold + 1), 'a') as f:
            f.write('Training avg = {} \n \n'.format(np.mean(train_sdr)))
            f.write('Actual \t Predicted \n')
            for i in range(len(prediction)):
                f.write('{0} \t {1} \n'.format(test_sdr[i][0], prediction[i]))
        pprint.pprint(prediction)
        predicted.append(prediction)
        with open(summary_file, 'a') as f:
            mse = np.square(test_sdr - prediction).mean()
            f.write('Fold {0}\t mse = {1} dB \t time = {2} min \n'.format(fold + 1, mse, elapsed / 60.))
        plot(test_sdr, prediction, output_folder + 'scatter_fold{}.png'.format(fold + 1))

        tf.reset_default_graph()

    predicted = np.array(predicted).flatten()
    print("Test MSE: ", np.square(sdr_array - predicted).mean())
    plot(sdr_array, predicted, output_folder + 'scatter_all_folds_{}_epochs.png'.format(n_epochs))
# IMDB Dataset loading
train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000,
                                valid_portion=0.1)
trainX, trainY = train
testX, testY = test

# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=100, value=0.)
testX = pad_sequences(testX, maxlen=100, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY)
testY = to_categorical(testY)

# Building convolutional network
network = input_data(shape=[None, 100], name='input')
network = tflearn.embedding(network, input_dim=10000, output_dim=128)
branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
network = merge([branch1, branch2, branch3], mode='concat', axis=1)
network = tf.expand_dims(network, 2)
network = global_max_pool(network)
network = dropout(network, 0.5)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001,
                     loss='categorical_crossentropy', name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit(trainX, trainY, n_epoch = 5, shuffle=True, validation_set=(testX, testY), show_metric=True, batch_size=32)
testY = to_categorical(testY, nb_classes=number_classes)   #y as one hot
print("end padding & transform to one hot...")
#--------------------------------------------------------------------------------------------------
    # cache trainX,trainY,testX,testY for next time use.
#    with open(f_cache, 'w') as f:
#        pickle.dump((trainX,trainY,testX,testY,vocab_size),f)
#else:
#    print("traning data exists in cache. going to use it.")

# 3.Building convolutional network
######################################MODEL:1.conv-2.conv-3.conv-4.max_pool-5.dropout-6.FC##############################################################################################
#(shape=None, placeholder=None, dtype=tf.float32,data_preprocessing=None, data_augmentation=None,name="InputData")
network = input_data(shape=[None, 100], name='input') #[None, 100] `input_data` is used as a data entry (placeholder) of a network. This placeholder will be feeded with data when training
network = tflearn.embedding(network, input_dim=vocab_size, output_dim=256) #TODO 128 [None, 100,128].embedding layer for a sequence of ids. network: Incoming 2-D Tensor. input_dim: vocabulary size, oput_dim:embedding size
         #conv_1d(incoming,nb_filter,filter_size)
branch1 = conv_1d(network, 256, 1, padding='valid', activation='relu', regularizer="L2") #128
branch2 = conv_1d(network, 256, 2, padding='valid', activation='relu', regularizer="L2") #128
branch3 = conv_1d(network, 256, 3, padding='valid', activation='relu', regularizer="L2") #128 [batch_size, new steps1, nb_filters]. padding:"VALID",only ever drops the right-most columns
branch4 = conv_1d(network, 256, 4, padding='valid', activation='relu', regularizer="L2") #128 [batch_size, new steps2, nb_filters]
branch5 = conv_1d(network, 256, 5, padding='valid', activation='relu', regularizer="L2") #128 [batch_size, new steps3, nb_filters]
branch6 = conv_1d(network, 256, 6, padding='valid', activation='relu', regularizer="L2") #128 [batch_size, new steps3, nb_filters] #ADD
branch7 = conv_1d(network, 256, 7, padding='valid', activation='relu', regularizer="L2") #128 [batch_size, new steps3, nb_filters] #ADD
branch8 = conv_1d(network, 256, 7, padding='valid', activation='relu', regularizer="L2") #128 [batch_size, new steps3, nb_filters] #ADD
branch9 = conv_1d(network, 256, 8, padding='valid', activation='relu', regularizer="L2") #128 [batch_size, new steps3, nb_filters] #ADD
branch10 = conv_1d(network,256, 9, padding='valid', activation='relu', regularizer="L2") #128 [batch_size, new steps3, nb_filters] #ADD
network = merge([branch1, branch2, branch3,branch4,branch5,branch6, branch7, branch8,branch9,branch10], mode='concat', axis=1) # merge a list of `Tensor` into a single one.===>[batch_size, new steps1+new step2+new step3, nb_filters]
network = tf.expand_dims(network, 2) #[batch_size, new steps1+new step2+new step3,1, nb_filters] Inserts a dimension of 1 into a tensor's shape
network = global_max_pool(network) #input: 4-D tensors,[batch_size,height,width,in_channels]; output:2-D Tensor,[batch_size, pooled dim]
network = dropout(network, 0.5) #[batch_size, pooled dim]
network = fully_connected(network, number_classes, activation='softmax') #matmul([batch_size, pooled_dim],[pooled_dim,2])---->[batch_size,number_classes]
#top5 = tflearn.metrics.Top_k(k=5)
testY = to_categorical(testY, nb_classes=number_classes)   #y as one hot
print("end padding & transform to one hot...")
#--------------------------------------------------------------------------------------------------
    # cache trainX,trainY,testX,testY for next time use.
#    with open(f_cache, 'w') as f:
#        pickle.dump((trainX,trainY,testX,testY,vocab_size),f)
#else:
#    print("traning data exists in cache. going to use it.")

# 3.Building convolutional network
######################################MODEL:1.conv-2.conv-3.conv-4.max_pool-5.dropout-6.FC##############################################################################################
#(shape=None, placeholder=None, dtype=tf.float32,data_preprocessing=None, data_augmentation=None,name="InputData")
network = input_data(shape=[None, 100], name='input') #[None, 100] `input_data` is used as a data entry (placeholder) of a network. This placeholder will be feeded with data when training
network = tflearn.embedding(network, input_dim=vocab_size, output_dim=128) #TODO [None, 100,128].embedding layer for a sequence of ids. network: Incoming 2-D Tensor. input_dim: vocabulary size, oput_dim:embedding size
         #conv_1d(incoming,nb_filter,filter_size)
branch1 = conv_1d(network, 128, 1, padding='valid', activation='relu', regularizer="L2")
branch2 = conv_1d(network, 128, 2, padding='valid', activation='relu', regularizer="L2")
branch3 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2") # [batch_size, new steps1, nb_filters]. padding:"VALID",only ever drops the right-most columns
branch4 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2") # [batch_size, new steps2, nb_filters]
branch5 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2") # [batch_size, new steps3, nb_filters]
network = merge([branch1, branch2, branch3,branch4,branch5], mode='concat', axis=1) # merge a list of `Tensor` into a single one.===>[batch_size, new steps1+new step2+new step3, nb_filters]
network = tf.expand_dims(network, 2) #[batch_size, new steps1+new step2+new step3,1, nb_filters] Inserts a dimension of 1 into a tensor's shape
network = global_max_pool(network) #[batch_size, pooled dim]
network = dropout(network, 0.5) #[batch_size, pooled dim]
network = fully_connected(network, number_classes, activation='softmax') #matmul([batch_size, pooled_dim],[pooled_dim,2])---->[batch_size,number_classes]
top5 = tflearn.metrics.Top_k(k=5)
network = regression(network, optimizer='adam', learning_rate=0.001,loss='categorical_crossentropy', name='target') #metric=top5
######################################MODEL:1.conv-2.conv-3.conv-4.max_pool-5.dropout-6.FC################################################################################################
# 4.Training
model = tflearn.DNN(network, tensorboard_verbose=0)
#model.fit(trainX, trainY, n_epoch = 10, shuffle=True, validation_set=(testX, testY), show_metric=True, batch_size=256) #32