Ejemplo n.º 1
0
def train_repet_network(beat_spectrum_array, sdr_array, n_epochs, take):
    """

    :param beat_spectrum_array:
    :param sdr_array:
    :param n_epochs:
    :param take:
    :return:
    """
    beat_spec_len = 432
    with tf.Graph().as_default():
        input_layer = input_data(shape=[None, beat_spec_len, 1])
        conv1 = conv_1d(input_layer, 32, 4, activation="relu", regularizer="L2")
        max_pool1 = max_pool_1d(conv1, 2)
        conv2 = conv_1d(max_pool1, 64, 80, activation="relu", regularizer="L2")
        max_pool2 = max_pool_1d(conv2, 2)
        fully1 = fully_connected(max_pool2, 128, activation="relu")
        dropout1 = dropout(fully1, 0.8)
        fully2 = fully_connected(dropout1, 256, activation="relu")
        dropout2 = dropout(fully2, 0.8)
        linear = fully_connected(dropout2, 1, activation="linear")
        regress = tflearn.regression(linear, optimizer="rmsprop", loss="mean_square", learning_rate=0.001)

        # Training
        model = tflearn.DNN(regress)  # , session=sess)
        model.fit(
            beat_spectrum_array,
            sdr_array,
            n_epoch=n_epochs,
            snapshot_step=1000,
            show_metric=True,
            run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take),
        )

        return model
Ejemplo n.º 2
0
def load_audio_convnet(filename):
    input_layer = input_data(shape=[None, MAX_CONV, 1])
    conv_layer_1  = conv_1d(input_layer, nb_filter=8, filter_size=79, activation='relu', name='conv_layer_1')
    pool_layer_1  = max_pool_1d(conv_layer_1, 100, name='pool_layer_1')
    conv_layer_2  = conv_1d(pool_layer_1, nb_filter=16, filter_size=11, activation='relu', name='conv_layer_2')
    pool_layer_2  = max_pool_1d(conv_layer_2, 5, name='pool_layer_2')
    fc_layer_1  = fully_connected(pool_layer_2, 100, activation='tanh', name='fc_layer_1')
    fc_layer_2 = fully_connected(fc_layer_1, 3, activation='softmax', name='fc_layer_2')
    network = regression(fc_layer_2, optimizer='sgd', loss='categorical_crossentropy', learning_rate=0.1)
    model = tflearn.DNN(network)
    model.load(filename)
    return model
Ejemplo n.º 3
0
def main():
    pickle_folder = '../pickles_rolloff'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
    pickle_folders_to_load = sorted(pickle_folders_to_load)

    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'beat_spec'
    beat_spec_len = 432

    # training params
    n_classes = 16
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00


    # set up training, testing, & validation partitions
    beat_spec_array, sdr_array = load_beat_spec_and_sdrs(pickle_folders_to_load, pickle_folder,
                                                         feature, fg_or_bg, sdr_type)

    train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent,
                                            testing_percent, validation_percent)

    trainX = np.expand_dims([beat_spec_array[i] for i in train], -1)
    trainY = np.expand_dims([sdr_array[i] for i in train], -1)
    testX = np.expand_dims([beat_spec_array[i] for i in test], -1)
    testY = np.array([sdr_array[i] for i in test])

    # Building convolutional network
    network = input_data(shape=[None, beat_spec_len, 1])
    network = conv_1d(network, 32, 4, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = conv_1d(network, 64, 80, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='relu') # look for non-tanh things???
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)

    # Training
    model = tflearn.DNN(regress, tensorboard_verbose=1)
    model.fit(trainX, trainY, n_epoch=100,
              snapshot_step=1000, show_metric=True, run_id='relus_100_3')

    predicted = np.array(model.predict(testX))[:,0]
    # pprint.pprint()
    print("Test MSE: ", np.square(testY - predicted).mean())
    plot(testY, predicted)
def main():
    pickle_folder = 'pickles_combined'


    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'beat_spec'

    # training params
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00
    beat_spec_max = 355


    # set up training, testing, & validation partitions
    beat_spec_array, sdr_array = unpickle_beat_spec_and_sdrs(pickle_folder, beat_spec_max)

    train, test, validate = split_into_sets(len(beat_spec_array), training_percent,
                                            testing_percent, validation_percent)

    trainX = np.expand_dims([beat_spec_array[i] for i in train], -1)
    trainY = np.expand_dims([sdr_array[i] for i in train], -1)
    testX = np.expand_dims([beat_spec_array[i] for i in test], -1)
    testY = np.array([sdr_array[i] for i in test])

    # Building convolutional network
    network = input_data(shape=[None, beat_spec_max, 1])
    network = conv_1d(network, 32, 4, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = conv_1d(network, 64, 80, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='relu') # look for non-tanh things???
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)

    start = time.time()
    # Training
    model = tflearn.DNN(regress, tensorboard_verbose=1)
    model.fit(trainX, trainY, n_epoch=2000,
              snapshot_step=1000, show_metric=True, run_id='mir1k_2000_truncate')
    elapsed = (time.time() - start)

    predicted = np.array(model.predict(testX))[:,0]
    print("Test MSE: ", np.square(testY - predicted).mean())
    print(elapsed, "seconds")
    plot(testY, predicted)
Ejemplo n.º 5
0
def build_tflearn_cnn(length):
    input_layer = input_data(shape=[None, length, 1])

    # Convolution Layer
    conv_layer_1 = conv_1d(input_layer,
                           nb_filter=512,
                           filter_size=10,
                           activation='relu',
                           name='conv_layer_1',
                           weights_init='xavier',
                           regularizer="L2")
    pool_layer_1 = max_pool_1d(conv_layer_1, 4, name='pool_layer_1')

    conv_layer_2 = conv_1d(pool_layer_1,
                           nb_filter=512,
                           filter_size=5,
                           activation='relu',
                           name='conv_layer_2',
                           weights_init='xavier',
                           regularizer="L2")
    pool_layer_3 = max_pool_1d(conv_layer_2, 4, name='pool_layer_3')
    # flat = flatten(pool_layer_3)

    fc_layer_4 = fully_connected(pool_layer_3,
                                 256,
                                 activation='relu',
                                 name='fc_layer_4',
                                 regularizer='L2')
    drop_2 = dropout(fc_layer_4, drop_out_prob)
    fc_layer_5 = fully_connected(drop_2,
                                 128,
                                 activation='relu',
                                 name='fc_layer_5',
                                 regularizer='L2')
    drop_3 = dropout(fc_layer_5, drop_out_prob)

    # Output
    fc_layer_2 = fully_connected(drop_3,
                                 3,
                                 activation='softmax',
                                 name='output')
    network = regression(fc_layer_2,
                         optimizer='adam',
                         loss='softmax_categorical_crossentropy',
                         learning_rate=0.0001,
                         metric='accuracy')
    model = tflearn.DNN(network, tensorboard_verbose=0)

    return model
def main():
    """

    :return:
    """
    pickle_folder = '../Repet/pickles_rolloff'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]

    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'beat_spec'
    beat_spec_len = 432
    n_epochs = 200
    take = 1

    # set up training, testing, & validation partitions
    beat_spec_array, sdr_array = load_beat_spec_and_sdrs(pickle_folders_to_load, pickle_folder,
                                                         feature, fg_or_bg, sdr_type)

    beat_spec_array = np.expand_dims(beat_spec_array, -1)
    sdr_array = np.expand_dims(sdr_array, -1)

    # Building convolutional network
    network = input_data(shape=[None, beat_spec_len, 1])
    network = conv_1d(network, 32, 4, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = conv_1d(network, 64, 80, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='relu')  # look for non-tanh things???
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='rmsprop', loss='mean_square', learning_rate=0.001)

    start = time.time()
    # Training
    model = tflearn.DNN(regress)  # , session=sess)
    model.fit(beat_spec_array, sdr_array, n_epoch=n_epochs,
              snapshot_step=1000, show_metric=True,
              run_id='repet_save_{0}_epochs_take_{1}'.format(n_epochs, take))
    elapsed = (time.time() - start)
    print('Finished training after ' + elapsed + 'seconds. Saving...')

    model_output_folder = 'network_outputs/'
    model_output_file = join(model_output_folder, 'repet_save_{0}_epochs_take_{1}'.format(n_epochs, take))

    model.save(model_output_file)
Ejemplo n.º 7
0
 def transform_embedded_sequences(self, embedded_sequences):
     net = conv_1d(embedded_sequences, self.filters, 5, 1, activation='relu', padding="valid")
     net = max_pool_1d(net, 5, padding="valid")
     if self.dropout_rate > 0:
         net = dropout(net, self.dropout_rate)
     net = conv_1d(net, self.filters, 5, activation='relu', padding="valid")
     net = max_pool_1d(net, 5, padding="valid")
     if self.dropout_rate > 0:
         net = dropout(net, self.dropout_rate)
     net = conv_1d(net, self.filters, 5, activation='relu', padding="valid")
     net = max_pool_1d(net, 35)
     if self.dropout_rate > 0:
         net = dropout(net, self.dropout_rate)
     net = fully_connected(net, self.filters, activation='relu')
     preds = fully_connected(net, self.class_count, activation='softmax')
     return preds
Ejemplo n.º 8
0
	def residual_block_1D(incoming,out_channels,downsample=False, first=False, filt_len=16, dropout_prob=0.85, downsampleSecond=True):
		resnet = incoming
		in_channels = incoming.shape[-1].value
		strides = (2 if downsample else 1)
		dsLayer = (1 if downsampleSecond else 0)
		identity = resnet

		nConv = 2
		if first:
			resnet = conv_1d(resnet, out_channels, filt_len, strides,weights_init="variance_scaling")
			nConv = 1

		for i in range(nConv):
			resnet = batch_normalization(resnet)
			resnet = relu(resnet)
			resnet = dropout(resnet, dropout_prob)
			if downsample and i==dsLayer: #1 as in, second layer
				resnet = conv_1d(resnet,out_channels,filt_len, strides=1, weights_init="variance_scaling") #puts the downsampling on the first conv layer only
			else:
				resnet = conv_1d(resnet,out_channels,filt_len, strides, weights_init="variance_scaling")

		#Beginning of skip connection
		identity = max_pool_1d(identity,strides, strides)

		if in_channels != out_channels:

			ch = (out_channels - in_channels) // 2
			identity = tf.pad(identity,[[0,0],[0,0],[ch,ch]])
			in_channels = out_channels

		resnet = resnet + identity
		
		return resnet
Ejemplo n.º 9
0
def model_fn(net, X_len, max_reach, block_size, out_classes, batch_size, dtype,
             **kwargs):
    """
        Args:
        net -> Input tensor shaped (batch_size, max_reach + block_size + max_reach, 3)
        Returns:
        logits -> Unscaled logits tensor in time_major form, (block_size, batch_size, out_classes)
    """

    net = batch_normalization(net, decay=0.99, scope="Initial_bn")
    for block in range(1, 3):
        with tf.variable_scope("block%d" % block):
            for layer in range(kwargs['num_layers']):
                with tf.variable_scope("layer%d" % layer):
                    net = conv_1d(net, 64, 9, scope='conv1d')
                    net = batch_normalization(net, scope='bn')
                    net = tf.nn.relu(net)
            net = max_pool_1d(net, 2)
        net = tf.nn.relu(net)

    net = central_cut(net, block_size, 4)
    net = tf.transpose(net, [1, 0, 2], name="Shift_to_time_major")
    net = conv_1d(net, 9, 1, scope='logits')
    return {
        'logits': net,
        'init_state': tf.constant(0),
        'final_state': tf.constant(0),
    }
Ejemplo n.º 10
0
def multi_filter_conv_block(input, n_filters, reuse=False,
                            dropout_keep_prob=0.5, activation='relu',
                            padding='same', name='mfcb'):
    branch1 = conv_1d(input, n_filters, 1, padding=padding,
                      activation=activation, reuse=reuse,
                      scope='{}_conv_branch_1'.format(name))
    branch2 = conv_1d(input, n_filters, 3, padding=padding,
                      activation=activation, reuse=reuse,
                      scope='{}_conv_branch_2'.format(name))
    branch3 = conv_1d(input, n_filters, 5, padding=padding,
                      activation=activation, reuse=reuse,
                      scope='{}_conv_branch_3'.format(name))

    unstacked_b1 = tf.unstack(branch1, axis=1,
                              name='{}_unstack_b1'.format(name))
    unstacked_b2 = tf.unstack(branch2, axis=1,
                              name='{}_unstack_b2'.format(name))
    unstacked_b3 = tf.unstack(branch3, axis=1,
                              name='{}_unstack_b3'.format(name))

    n_grams = []
    for t_b1, t_b2, t_b3 in zip(unstacked_b1, unstacked_b2, unstacked_b3):
        n_grams.append(tf.stack([t_b1, t_b2, t_b3], axis=0))
    n_grams_merged = tf.concat(n_grams, axis=0)
    n_grams_merged = tf.transpose(n_grams_merged, perm=[1, 0, 2])
    gram_pooled = max_pool_1d(n_grams_merged, kernel_size=3, strides=3)
    cnn_out = dropout(gram_pooled, dropout_keep_prob)
    return cnn_out
Ejemplo n.º 11
0
def model_fn(net, X_len, max_reach, block_size, out_classes, batch_size, dtype,
             **kwargs):
    """
        Args:
        net -> Input tensor shaped (batch_size, max_reach + block_size + max_reach, 3)
        Returns:
        logits -> Unscaled logits tensor in time_major form, (block_size, batch_size, out_classes)
    """

    for block in range(1, 4):
        with tf.variable_scope("block%d" % block):
            for layer in range(1, 1 + 1):
                with tf.variable_scope('layer_%d' % layer):
                    net = conv_1d(net, 32, 3)
            net = max_pool_1d(net, 2)
        net = tf.nn.relu(net)

    net = central_cut(net, block_size, 8)
    net = tf.transpose(net, [1, 0, 2], name="Shift_to_time_major")
    net = conv_1d(net, 9, 1, scope='logits')
    return {
        'logits': net,
        'init_state': tf.constant(0),
        'final_state': tf.constant(0),
    }
Ejemplo n.º 12
0
    def sentiment_analysis(self, sentencedata):

        unique_words = self.uniqueword_csvload()

        neurons = len(unique_words)

        reset_default_graph()
        network = input_data(shape=[None, 1, neurons])
        network = conv_1d(network, 8, 3, activation='relu')
        network = max_pool_1d(network, 3)

        network = conv_1d(network, 16, 3, activation='relu')
        network = max_pool_1d(network, 3)

        network = fully_connected(network, 8, activation='relu')
        network = dropout(network, 0.5)

        network = fully_connected(network, 2, activation='softmax')
        network = regression(network,
                             optimizer='adam',
                             learning_rate=0.01,
                             loss='categorical_crossentropy')

        model = tflearn.DNN(network)
        model.load(
            "./model/thaitext-classifier-combined_inhousedata-UTF8-4-100.tfl")

        input_sentencedata = self.preprocess_server_2(sentencedata)[0]
        #input_uniquewords = self.get_uniquewords(input_sentencedata)
        sentences = []
        #f = open(file_path, 'r')
        for word in input_sentencedata:
            sentences.append(word)
        vector_one = []
        inner_vector = []
        for word in unique_words:
            if word in sentences:
                vector_one.append(1)
            else:
                vector_one.append(0)
        inner_vector.append(vector_one)
        inner_vector = np.array(inner_vector, dtype=np.float32)
        print("inner_vector:", inner_vector)
        label = model.predict_label([inner_vector])
        pred = model.predict([inner_vector])

        return pred
def main():
    """

    :return:
    """
    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'sim_mat'
    beat_spec_len = 432

    # set up training, testing, & validation partitions
    sim_mat_array, sdr_array = get_generated_data(feature, fg_or_bg, sdr_type)

    # training params
    n_classes = 10
    n_training_steps = 1000
    training_step_size = 100
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00

    sdr_array_1h, hist = sdrs_to_one_hots(sdr_array, n_classes, True)

    train, test, validate = split_into_sets(len(sim_mat_array), training_percent,
                                            testing_percent, validation_percent)

    # Building convolutional network
    network = input_data(shape=[None, beat_spec_len, 1], name='input')
    network = conv_1d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    # network = local_response_normalization(network)
    # network = batch_normalization(network)
    # network = conv_1d(network, 64, 3, activation='relu', regularizer="L2")
    # network = max_pool_1d(network, 2)
    # network = local_response_normalization(network)
    # network = batch_normalization(network)
    # network = fully_connected(network, 128, activation='tanh')
    # network = dropout(network, 0.5)
    network = fully_connected(network, 512, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, n_classes, activation='softmax')
    # network = fully_connected(network, 1, activation='linear')
    network = regression(network, optimizer='adagrad', learning_rate=0.01,
                         loss='categorical_crossentropy', name='target')

    X = np.expand_dims(sim_mat_array, -1)
    Y = np.array(sdr_array_1h)
    # X = np.expand_dims([beat_spec_array[i] for i in train], -1)
    # Y = np.array([sdr_array_1h[i] for i in train])
    # testX = np.expand_dims([beat_spec_array[i] for i in test], -1)
    # testY = np.array([sdr_array[i] for i in test])

    # Training
    model = tflearn.DNN(network, tensorboard_verbose=1)
    model.fit({'input': X}, {'target': Y}, n_epoch=20,
              validation_set=0.1,
              snapshot_step=1000, show_metric=True, run_id='{} classes'.format(n_classes - 1))
Ejemplo n.º 14
0
def model_fn(net,
             X_len,
             max_reach,
             block_size,
             out_classes,
             batch_size,
             reuse=False,
             **kwargs):
    """
        Args:
        net -> Input tensor shaped (batch_size, max_reach + block_size + max_reach, 3)
        Returns:
        logits -> Unscaled logits tensor in time_major form, (block_size, batch_size, out_classes)
    """

    print("model in", net.get_shape())
    with tf.name_scope("model"):
        for j in range(3):
            with tf.variable_scope("block%d" % (j + 1)):
                for i, no_channel in zip([1, 4, 16],
                                         np.array([64, 64, 128]) * (2**j)):
                    with tf.variable_scope("atrous_conv1d_%d" % i):
                        filter = tf.get_variable("W",
                                                 shape=(3, net.get_shape()[-1],
                                                        no_channel))
                        bias = tf.get_variable("b", shape=(no_channel, ))
                        net = atrous_conv1d(net, filter, i,
                                            padding="SAME") + bias
                        net = tf.nn.relu(net)
                net = max_pool_1d(net, 2)
        print("after conv", net.get_shape())
        net = tf.transpose(net, [1, 0, 2], name="Shift_to_time_major")

        outputs = net
        # outputs.get_shape()[-1]  # Number of output filters
        state_size = 128 * 4
        init_state = tf.constant(0.1, dtype=tf.float32)
        final_state = tf.constant(0.1, dtype=tf.float32)

        print("outputs", outputs.get_shape())
        with tf.variable_scope("Output"):
            W = tf.get_variable("W", shape=[state_size, out_classes])
            b = tf.get_variable("b", shape=[out_classes])
            logits = tf.nn.conv1d(outputs,
                                  tf.reshape(W, (1, state_size, out_classes)),
                                  1,
                                  padding='SAME')
            logits += b
    print("model out", logits.get_shape())
    return {
        'logits': logits,
        'init_state': init_state,
        'final_state': final_state,
    }
def build_model_specific():
    ### IS ANY OF THIS NECESSARY FOR LIGHT/DARK? IN GENERAL W/ STAIONARY CAMERA?
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()

    # Specify shape of the data, image prep
    network = input_data(shape=[None, 52, 64],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)

    # conv_2d incoming, nb_filter, filter_size
    # incoming: Tensor. Incoming 4-D Tensor.
    # nb_filter: int. The number of convolutional filters. # WHAT IS THIS?
    # filter_size: 'intor list ofints`. Size of filters.   # WHAT IS THIS?
    network = conv_1d(network, 512, 3, activation='relu')

    # (incoming, kernel_size)
    # incoming: Tensor. Incoming 4-D Layer.
    # kernel_size: 'intor list ofints`. Pooling kernel size.
    network = max_pool_1d(network, 2)

    network = conv_1d(network, 64, 3, activation='relu')
    network = conv_1d(network, 64, 3, activation='relu')
    network = max_pool_1d(network, 2)

    network = fully_connected(network, 512, activation='relu')

    network = dropout(network, 0.5)

    network = fully_connected(network, 4, activation='softmax')

    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.0003)

    model = tflearn.DNN(network, tensorboard_verbose=0)
    return model
Ejemplo n.º 16
0
def make_audio_convnet():
    global buzz_train_d_conv
    buzz_test_d_conv = buzz_train_d_conv

    input_layer = input_data(shape=[None, MAX_CONV, 1])
    conv_layer_1 = conv_1d(input_layer,
                           nb_filter=8,
                           filter_size=79,
                           activation='relu',
                           name='conv_layer_1')
    pool_layer_1 = max_pool_1d(conv_layer_1, 100, name='pool_layer_1')
    conv_layer_2 = conv_1d(pool_layer_1,
                           nb_filter=16,
                           filter_size=11,
                           activation='relu',
                           name='conv_layer_2')
    pool_layer_2 = max_pool_1d(conv_layer_2, 5, name='pool_layer_2')
    fc_layer_1 = fully_connected(pool_layer_2,
                                 100,
                                 activation='tanh',
                                 name='fc_layer_1')
    fc_layer_2 = fully_connected(fc_layer_1,
                                 3,
                                 activation='softmax',
                                 name='fc_layer_2')
    network = regression(fc_layer_2,
                         optimizer='sgd',
                         loss='categorical_crossentropy',
                         learning_rate=0.1)
    model = tflearn.DNN(network)

    model.fit(buzz_train_d_conv[0],
              buzz_train_d_conv[1],
              n_epoch=30,
              shuffle=True,
              validation_set=(buzz_test_d_conv[0], buzz_test_d_conv[1]),
              show_metric=True,
              batch_size=100,
              run_id='audio_convnet')
    model.save('pck_nets/AudioConvNet.tfl')
def model_fn(net, X_len, max_reach, block_size, out_classes, batch_size, dtype,
             **kwargs):
    """
        Args:
        net -> Input tensor shaped (batch_size, max_reach + block_size + max_reach, 3)
        Returns:
        logits -> Unscaled logits tensor in time_major form, (block_size, batch_size, out_classes)
    """

    print("model in", net.get_shape())
    for block in range(1, 4):
        with tf.variable_scope("block%d" % block):
            if block > 1:
                net = tf.expand_dims(net, 3)
                net = tf.layers.max_pooling2d(net, [1, 2], [1, 2])
                net = tf.squeeze(net, axis=3)

            for layer in range(kwargs['num_layers']):
                with tf.variable_scope('layer_%d' % layer):
                    res = net
                    for sublayer in range(kwargs['num_sub_layers']):
                        res = batch_normalization(res,
                                                  scope='bn_%d' % sublayer)
                        res = tf.nn.relu(res)
                        res = conv_1d(
                            res,
                            32 * 2**(4 - block),
                            3,
                            scope="conv_1d_%d" % sublayer,
                            weights_init=variance_scaling_initializer(
                                dtype=dtype))
                    k = tf.get_variable(
                        "k",
                        initializer=tf.constant_initializer(1.0),
                        shape=[])
                    net = tf.nn.relu(k) * res + net
            net = max_pool_1d(net, 2)
        net = tf.nn.relu(net)

    net = central_cut(net, block_size, 8)
    print("after slice", net.get_shape())
    net = tf.transpose(net, [1, 0, 2], name="Shift_to_time_major")
    print("after transpose", net.get_shape())
    net = conv_1d(net, 9, 1, scope='logits')
    print("model out", net.get_shape())
    return {
        'logits': net,
        'init_state': tf.constant(0),
        'final_state': tf.constant(0),
    }
def model_fn(net, X_len, max_reach, block_size, out_classes, batch_size, k, reuse=False, **kwargs):
    """
        Args:
        net -> Input tensor shaped (batch_size, max_reach + block_size + max_reach, 3)
        Returns:
        logits -> Unscaled logits tensor in time_major form, (block_size, batch_size, out_classes)
    """

    print("model in", net.get_shape())
    # net = tf.Print(net, [tf.shape(net), tf.shape(X_len)], message="netty")
    for j in range(3):
        with tf.variable_scope("block%d" % (j + 1)):
            for i, no_channel in zip([1, 2, 4], [16, 16, 16]):
                with tf.variable_scope("atrous_conv1d_%d" % i):
                    filter = tf.get_variable("W", shape=(3, net.get_shape()[-1], no_channel))
                    bias = tf.get_variable("b", shape=(no_channel,))
                    net = atrous_conv1d(net, filter, i, padding="VALID") + bias
                    net = tf.nn.relu(net)
                    tf.get_default_graph().add_to_collection("activations", net)
            net = tf.Print(net, [tf.shape(net)], first_n=10, message="net, pre_pool")
            net = max_pool_1d(net, 2)
    print("after conv", net.get_shape())
    net = tf.transpose(net, [1, 0, 2], name="Shift_to_time_major")

    state_size = 32  # outputs.get_shape()[-1]  # Number of output filters
    with tf.name_scope("RNN"):
        cell = tf.nn.rnn_cell.GRUCell(state_size)
        init_state = cell.zero_state(batch_size, dtype=tf.float32)
        outputs, final_state = tf.nn.dynamic_rnn(cell, net, initial_state=init_state, sequence_length=X_len, time_major=True)
    # outputs = net
    # init_state = tf.constant(0.1, dtype=tf.float32)
    # final_state = tf.constant(0.1, dtype=tf.float32)

    outputs = tf.Print(outputs, [tf.shape(outputs)], first_n=1, message="outputs_pre_w")
    print("outputs", outputs.get_shape())
    with tf.variable_scope("Output"):
        outputs = tf.reshape(outputs, [-1, state_size])
        W = tf.get_variable("W", shape=[state_size, out_classes])
        b = tf.get_variable("b", shape=[out_classes])
        outputs = tf.matmul(outputs, W) + b
        logits = tf.reshape(outputs, [block_size // 8, batch_size, out_classes])

    print("model out", logits.get_shape())
    return {
        'logits': logits,
        'init_state': init_state,
        'final_state': final_state,
        'reg': k * ops.running_mean(logits, [3, 4, 5, 6], [1, 2, 4, 8], out_classes)
    }
Ejemplo n.º 19
0
def model(lr=LEARNING_RATE):
    network = input_data(shape=[None, 600, 4], name='features')
    network = conv_1d(network, 300, 19, strides=1, activation='relu')
    network = max_pool_1d(network, 3, strides=3)
    network = conv_1d(network, 200, 11, strides=1, activation='relu')
    network = max_pool_1d(network, 4, strides=4)
    network = conv_1d(network, 200, 7, strides=1, activation='relu')
    network = max_pool_1d(network, 4, strides=4)
    network = fully_connected(network, 1000, activation='relu')
    network = fully_connected(network, 1000, activation='relu')
    network = fully_connected(network, 164, activation='sigmoid')
    network = regression(network,
                         optimizer='rmsprop',
                         loss='binary_crossentropy',
                         learning_rate=lr,
                         name='labels')

    model = tflearn.DNN(network,
                        checkpoint_path=MODEL_PATH,
                        tensorboard_dir=TRAIN_PATH,
                        tensorboard_verbose=3,
                        max_checkpoints=1)

    return model
Ejemplo n.º 20
0
def main():
    pickle_folder = '../pickles_rolloff'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
    pickle_folders_to_load = sorted(pickle_folders_to_load)

    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'beat_spec'
    beat_spec_len = 432

    # training params
    n_classes = 16
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00


    # set up training, testing, & validation partitions
    beat_spec_array, sdr_array = load_beat_spec_and_sdrs(pickle_folders_to_load, pickle_folder,
                                                         feature, fg_or_bg, sdr_type)

    train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent,
                                            testing_percent, validation_percent)

    trainX = np.expand_dims([beat_spec_array[i] for i in train], -1)
    trainY = np.expand_dims([sdr_array[i] for i in train], -1)
    testX = np.expand_dims([beat_spec_array[i] for i in test], -1)
    testY = np.array([sdr_array[i] for i in test])

    # Building convolutional network
    input = input_data(shape=[None, beat_spec_len, 1])
    conv1 = conv_1d(input, 32, 10, activation='relu', regularizer="L2")
    max_pool1 = max_pool_1d(conv1, 2)
    full = fully_connected(max_pool1, 512, activation='tanh')
    # single = tflearn.single_unit(full)
    single = fully_connected(full, 1, activation='linear')
    regress = tflearn.regression(single, optimizer='sgd', loss='mean_square', learning_rate=0.01)

    # Training
    model = tflearn.DNN(regress, tensorboard_verbose=1)
    model.fit(trainX, trainY, n_epoch=500,
              snapshot_step=1000, show_metric=True, run_id='{} classes'.format(n_classes - 1))

    predicted = np.array(model.predict(testX))[:,0]
    plot(testY, predicted)
Ejemplo n.º 21
0
def model_fn(net,
             X_len,
             max_reach,
             block_size,
             out_classes,
             batch_size,
             reuse=False,
             **kwargs):
    """
        Args:
        net -> Input tensor shaped (batch_size, max_reach + block_size + max_reach, 3)
        Returns:
        logits -> Unscaled logits tensor in time_major form, (block_size, batch_size, out_classes)
    """

    print("model in", net.get_shape())
    for j in range(3):
        with tf.variable_scope("block%d" % (j + 1)):
            for i, no_channel in zip([1, 4, 16],
                                     np.array([64, 64, 128]) * (2**j)):
                with tf.variable_scope("atrous_conv1d_%d" % i):
                    filter = tf.get_variable("W",
                                             shape=(3, net.get_shape()[-1],
                                                    no_channel))
                    net = tf.nn.convolution(net,
                                            filter,
                                            padding="SAME",
                                            dilation_rate=[i])
                    net = batch_normalization(net, scope='bn')
                    net = tf.nn.relu(net)
            net = max_pool_1d(net, 2)
    net = central_cut(net, block_size, 8)
    net = tf.transpose(net, [1, 0, 2], name="Shift_to_time_major")
    net = conv_1d(net, 9, 1, scope='logits')
    print("model out", net.get_shape())
    return {
        'logits': net,
        'init_state': tf.constant(0),
        'final_state': tf.constant(0),
    }
Ejemplo n.º 22
0
def create1dConvNetNeuralNetworkModel(input_size, output_size, learningRate):

    # Specify the log directory
    logdir = 'log/1d/' + datetime.now().strftime('%Y%m%d-%H%M%S')
    # Limit gpu memory use
    #tflearn.init_graph(num_cores=1, gpu_memory_fraction=0.7)

    convnet = input_data(shape=[None, input_size], name='input_currentState')
    convnet = tflearn.embedding(convnet, input_dim=input_size, output_dim=2)

    convnet = conv_1d(convnet,
                      nb_filter=16,
                      filter_size=3,
                      strides=1,
                      padding='valid',
                      activation='relu')
    convnet = max_pool_1d(convnet, kernel_size=2, strides=2, padding='valid')

    convnet = flatten(convnet)

    convnet = fully_connected(convnet,
                              n_units=128,
                              weights_init='truncated_normal',
                              activation='relu')
    convnet = dropout(convnet, 0.5)

    convnet = fully_connected(convnet,
                              n_units=output_size,
                              activation='softmax')
    convnet = regression(convnet,
                         optimizer='adam',
                         learning_rate=learningRate,
                         loss='categorical_crossentropy',
                         name='targets')

    model = tflearn.DNN(convnet, tensorboard_dir=logdir)

    return model
Ejemplo n.º 23
0


# Preprocess data
lendat=len(data)
data = preprocess(data, lendat)

lendatVal=len(dataVal)
dataVal = preprocess(dataVal, lendatVal)

W= tflearn.initializations.xavier(uniform=True, seed=None, dtype=tf.float32)
# Building convolutional network
network = input_data(shape=[None, 1024], name='input')
network = tflearn.embedding(network, input_dim=71, output_dim=256)
network = conv_1d(network, 256, 7, padding='valid', scope='conv1', activation='relu')
network = max_pool_1d(network, 3, strides=3, name='Maxpool1D_1')
network = conv_1d(network, 256, 7, padding='valid', scope='conv2', activation='relu')
network = max_pool_1d(network, 3, strides=3, name='Maxpool1D_2')
network = conv_1d(network, 256, 3, padding='valid', scope='conv3', activation='relu')
network = conv_1d(network, 256, 3, padding='valid', scope='conv4', activation='relu')
network = conv_1d(network, 256, 3, padding='valid', scope='conv5', activation='relu')
network = conv_1d(network, 256, 3, padding='valid', scope='conv6', activation='relu')
network = max_pool_1d(network, 3, strides=3, name='Maxpool1D_Last')
network = tflearn.fully_connected(network, 1024, name='Fullyconected_0')
network = dropout(network, 0.5)
network = tflearn.fully_connected(network, 1024, name='Fullyconected_1')
network = dropout(network, 0.5)
network = fully_connected(network, 14, activation='softmax', name='FullyConected_Last')
network = regression(network, optimizer='adam', loss='categorical_crossentropy', name='target')

Ejemplo n.º 24
0
    if i == 0:
        VU = temp
    else:
        VU = merge([VU, temp], mode='concat', axis=0)
alphas_prot = tf.nn.softmax(tf.reduce_max(VU, axis=2), name='alphas')
Attn_prot = tf.reduce_sum(prot_gru_2 * tf.expand_dims(alphas_prot, -1), 1)
Attn_prot_reshape = tflearn.reshape(Attn_prot, [-1, GRU_size_prot, 1])
conv_1 = conv_1d(Attn_prot_reshape,
                 64,
                 8,
                 4,
                 activation='leakyrelu',
                 weights_init="xavier",
                 regularizer="L2",
                 name='conv1')
pool_1 = max_pool_1d(conv_1, 4, name='pool1')
prot_reshape_6 = tflearn.reshape(pool_1, [-1, 64 * 16])

prot_embd_W = []
prot_gru_1_gate_matrix = []
prot_gru_1_gate_bias = []
prot_gru_1_candidate_matrix = []
prot_gru_1_candidate_bias = []
prot_gru_2_gate_matrix = []
prot_gru_2_gate_bias = []
prot_gru_2_candidate_matrix = []
prot_gru_2_candidate_bias = []
for v in tf.global_variables():
    if "GRU/GRU/GRUCell/Gates/Linear/Matrix" in v.name:
        prot_gru_1_gate_matrix.append(v)
    elif "GRU/GRU/GRUCell/Candidate/Linear/Matrix" in v.name:
Ejemplo n.º 25
0
from tflearn.data_utils import to_categorical, image_preloader
import tflearn.datasets.mnist as mnist
import time

start_time = time.time()
# Data loading and preprocessing
X, Y = image_preloader('data.txt',
                       image_shape=(900, 900),
                       mode='file',
                       categorical_labels=True,
                       normalize=False)

# Building 'AlexNet'
network = input_data(shape=[None, 900, 900])
network = conv_1d(network, 96, 11, strides=4, activation='relu')
network = max_pool_1d(network, 1, strides=2)
#network = local_response_normalization(network)
network = conv_1d(network, 256, 5, activation='relu')
network = max_pool_1d(network, 1, strides=2)
#network = local_response_normalization(network)
network = conv_1d(network, 384, 3, activation='relu')
network = conv_1d(network, 384, 3, activation='relu')
network = conv_1d(network, 256, 3, activation='relu')
network = max_pool_1d(network, 1, strides=2)
#network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 3, activation='softmax')
network = regression(network,
def model_fn(net, X_len, max_reach, block_size, out_classes, batch_size, dtype, **kwargs):
    """
        Args:
        net -> Input tensor shaped (batch_size, max_reach + block_size + max_reach, 3)
        Returns:
        logits -> Unscaled logits tensor in time_major form, (block_size, batch_size, out_classes)
    """

    for block in range(1, 3):
        with tf.variable_scope("block%d" % block):
            for layer in range(kwargs['num_layers']):
                with tf.variable_scope('layer_%d' % layer):
                    res = net
                    for sublayer in range(kwargs['num_sub_layers']):
                        res = batch_normalization(
                            res, scope='bn_%d' % sublayer)
                        res = tf.nn.relu(res)
                        res = conv_1d(
                            res,
                            64,
                            3,
                            scope="conv_1d_%d" % sublayer,
                            weights_init=variance_scaling_initializer(
                                dtype=dtype)
                        )
                    k = tf.get_variable(
                        "k", initializer=tf.constant_initializer(1.0), shape=[])
                    net = tf.nn.relu(k) * res + net
            net = max_pool_1d(net, 2)
        net = tf.nn.relu(net)

    net = central_cut(net, block_size, 4)
    net = tf.transpose(net, [1, 0, 2], name="Shift_to_time_major")
    # with tf.name_scope("RNN"):
    #     from tensorflow.contrib.cudnn_rnn import CudnnGRU, RNNParamsSaveable
    #     rnn_layer = CudnnGRU(
    #         num_layers=1,
    #         num_units=64,
    #         input_size=64,
    #     )
    #
    #     print(rnn_layer.params_size())
    #     import sys
    #     sys.exit(0)
    #     rnn_params = tf.get_variable("rnn_params", shape=[rnn_layer.params_size()], validate_shape=False)
    #     params_saveable = RNNParamsSaveable(
    #         rnn_layer.params_to_canonical, rnn_layer.canonical_to_params, [rnn_params])
    #     tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, params_saveable)
    #

    with tf.name_scope("RNN"):
        cell = tf.contrib.rnn.GRUCell(64)
        init_state = cell.zero_state(batch_size, dtype=tf.float32)
        outputs, final_state = tf.nn.dynamic_rnn(cell, net, initial_state=init_state, sequence_length=tf.div(X_len + 3, 4), time_major=True, parallel_iterations=128)

    net = conv_1d(outputs, 9, 1, scope='logits')
    return {
        'logits': net,
        'init_state': init_state,
        'final_state': final_state
    }
class Thai_sentiment():
    
    file_path = './corpus/Combined_inhousedata_UTF8-1.csv'
    file_path3 = './trainingdataset/combined_inhousedata-UTF8-traindataset-1.csv'
    data, labels = load_csv(file_path, target_column=0, categorical_labels=True, n_classes=2)
    testdata, testlabels = load_csv(file_path3, target_column=0, categorical_labels=True, n_classes=2)

    def preprocess_server(data):
        rlist = []
        preprocessdata = []
        for i in range(len(data)):
            x = requests.get('http://174.138.26.245:5000/preprocess/'+data[i][0])
            resu = x.json()
            preprocessdata.append(resu['result'])
        for i in range(len(preprocessdata)):
            r = requests.get('http://174.138.26.245:5000/tokenize/'+preprocessdata[i])
            result = r.json()
            rlist.append(result['result'])
        return rlist

    '''def get_uniquewords(listdata):
        f = open('./uniqueword/combined_inhousedata_UTF8-1_uniquewords.csv', 'w')

        uniquewords = []
        for line in range(len(listdata)):
            words = listdata[line]
            inner_data = []
            for word in words:
                if word not in uniquewords:
                    #w = repr(word.encode('utf-8'))
                    uniquewords.append(word)
                    f.write(word+'\n')
        f.close()
        return uniquewords'''

    def preprocess_vector(listdata, uniquewords):
            sentences = []
            vectors = []
            mainvector = []
            #f = open(file_path, 'r')
            for line in range(len(listdata)):
                words = listdata[line]
                inner_data = []
                for word in words:
                    inner_data.append(word)
                sentences.append(inner_data)
            
            for sentence in sentences:
                inner_vector = []
                vectors = []
                for word in uniquewords:
                    if word in sentence:
                        inner_vector.append(1)
                    else:
                        inner_vector.append(0)
                vectors.append(inner_vector)
                mainvector.append(vectors)
            return np.array(mainvector, dtype=np.float32)

    def uniqueword_csvload():
        uniquewords = []
        f = open('./uniqueword/combined_inhousedata_UTF8-1_uniquewords.csv', 'r')
        for word in f:
            uniquewords.append(word.strip())
        return uniquewords

    pdata = preprocess_server(data)
    # unique_words = get_uniquewords(pdata)
    unique_words = uniqueword_csvload()
    data = preprocess_vector(pdata, unique_words)
    resultdata = preprocess_server(testdata)
    resultdata = preprocess_vector(resultdata, unique_words)

    neurons = len(unique_words)

    # shuffle the dataset
    data, labels = shuffle(data, labels)

    reset_default_graph()
    network = input_data(shape=[None, 1, neurons])
    network = conv_1d(network, 8, 3, activation='relu')
    network = max_pool_1d(network, 3)

    network = conv_1d(network, 16, 3, activation='relu')
    network = max_pool_1d(network, 3)
    
    network = fully_connected(network, 8, activation='relu')
    network = dropout(network, 0.5)

    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy')

    model = tflearn.DNN(network)

    model.fit(data, labels, n_epoch=100, shuffle=True, validation_set=(resultdata, testlabels) , show_metric=True, batch_size=None, snapshot_epoch=True, run_id='task-classifier')
    model.save("./model/thaitext-classifier-combined_inhousedata-UTF8-1-100.tfl")
    print("Network trained and saved as thaitext-classifier-combined_inhousedata-UTF8-1-100.tfl")

    result = model.evaluate(resultdata, testlabels)
    print("Evaluation result: %s" %result)

    tp = 0
    fp = 0
    tn = 0
    fn = 0
    predict = model.predict(resultdata)
    for i in range(0, len(testlabels)):
        pred = predict[i]
        label = testlabels[i]
        if label[1] == 1: # data is supposed be positive
            if pred[1] > 0.5:   # data is positive
                tp += 1
            else:               # data is negative
                fp += 1
        else:               # data is supposed to negative
            if pred[0] > 0.5:
                tn += 1         # data is negative
            else:
                fn += 1         # data is positive

    precision = float(tp / (tp + fp))
    recall = float(tp / (tp + fn))
    accuracy = float((tp + tn)/(tp + fp + tn + fn))
    f1 = float((2*precision*recall)/(precision+recall))
    print ("Precision: %s; Recall: %s" %(precision, recall))
    print("Acc: %s, F1: %s" %(accuracy, f1))
    
Ejemplo n.º 28
0
feature_GPCR_compound = tflearn.data_utils.featurewise_std_normalization(feature_GPCR_compound,std_feature_train_compound)
feature_GPCR_protein = tflearn.data_utils.featurewise_std_normalization(feature_GPCR_protein,std_feature_train_protein)


feature_kinase_compound = tflearn.data_utils.featurewise_zero_center(feature_kinase_compound,mean_feature_train_compound)
feature_kinase_protein = tflearn.data_utils.featurewise_zero_center(feature_kinase_protein,mean_feature_train_protein)

feature_kinase_compound = tflearn.data_utils.featurewise_std_normalization(feature_kinase_compound,std_feature_train_compound)
feature_kinase_protein = tflearn.data_utils.featurewise_std_normalization(feature_kinase_protein,std_feature_train_protein)

# Sep model
prot_data = input_data(shape=[None, 512])
prot_reshape = tflearn.reshape(prot_data, [-1, 256,2])
conv_1 = conv_1d(prot_reshape, 64, 4,2, activation='leakyrelu',weights_init="xavier", regularizer="L2",name='conv1')
pool_1 = max_pool_1d(conv_1, 4,name='pool1')
conv_2 = conv_1d(pool_1, 32, 4,2, activation='leakyrelu', weights_init="xavier",regularizer="L2",name='conv2')
pool_2 = max_pool_1d(conv_2, 2,name='pool2')
prot_reshape_4 = tflearn.reshape(pool_2, [-1, 32*8])


drug_data = input_data(shape=[None,256])
drug_reshape = tflearn.reshape(drug_data, [-1, 128,2])
conv_3 = conv_1d(drug_reshape, 64, 4,2, activation='leakyrelu', weights_init="xavier",regularizer="L2",name='conv3')
pool_3 = max_pool_1d(conv_3, 2,name='pool3')
conv_4 = conv_1d(pool_3, 32, 4,2, activation='leakyrelu', weights_init="xavier",regularizer="L2",name='conv4')
pool_4 = max_pool_1d(conv_4, 2,name='pool4')
drug_reshape_4 = tflearn.reshape(pool_4, [-1, 32*8])

merging =  merge([prot_reshape_4,drug_reshape_4],mode='concat',axis=1)
fc_2 = fully_connected(merging, 200, activation='leakyrelu',weights_init="xavier",name='fully2')
Ejemplo n.º 29
0
from tflearn.data_augmentation import ImageAugmentation
import time
import h5py

start_time = time.time()
X, Y = image_preloader('data.txt', image_shape=(900, 900),   mode='file', categorical_labels=True,   normalize=False)

img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=25.)
img_aug.add_random_blur(sigma_max=3.)

conv_input = input_data(shape=[None, 900, 900], name='input', data_augmentation=img_aug)
conv = conv_1d(conv_input, 10, filter_size=100, activation='leaky_relu', strides=1)
conv1 = conv_1d(conv_input, 5, filter_size=50, activation='leaky_relu', strides=1)
conv1 = max_pool_1d(conv1, kernel_size=2, strides=1)
convnet = merge([conv, conv1], mode='concat', axis=2)
convnet = conv_1d(convnet, 10, filter_size=20, activation='leaky_relu')
convnet = max_pool_1d(convnet, kernel_size=2, strides=1)
convnet = dropout(convnet, 0.5)
convnet = fully_connected(convnet, 3, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=0.0001, loss='categorical_crossentropy')
model = tflearn.DNN(convnet, tensorboard_verbose=3, tensorboard_dir='Tensordboard/')
model.fit(X, Y, n_epoch=5, validation_set=0.2, show_metric=True, batch_size=20, shuffle=True,
	snapshot_epoch=True, run_id='Digital Mammography ConCaDNet')

model.save('Model/model.tflearn')

end_time = time.time()
print("Training Time:")
print(end_time - start_time)
Ejemplo n.º 30
0
                                        [GRU_size_prot], stddev=0.1),
                                    restore=False)
V_prot = tf.tanh(tf.tensordot(prot_gru_2, W_prot, axes=1) + b_prot)
VU_prot = tf.tensordot(V_prot, U_prot, axes=1)
alphas_prot = tf.nn.softmax(VU_prot, name='alphas')
Attn_prot = tf.reduce_sum(prot_gru_2 * tf.expand_dims(alphas_prot, -1), 1)
Attn_prot_reshape = tflearn.reshape(Attn_prot, [-1, GRU_size_prot, 1])
conv_1 = conv_1d(Attn_prot_reshape,
                 64,
                 8,
                 4,
                 activation='leakyrelu',
                 weights_init="xavier",
                 regularizer="L2",
                 name='conv1')
pool_1 = max_pool_1d(conv_1, 4, name='pool1')
prot_reshape_6 = tflearn.reshape(pool_1, [-1, 64 * 16])

prot_embd_W = []
prot_gru_1_gate_matrix = []
prot_gru_1_gate_bias = []
prot_gru_1_candidate_matrix = []
prot_gru_1_candidate_bias = []
prot_gru_2_gate_matrix = []
prot_gru_2_gate_bias = []
prot_gru_2_candidate_matrix = []
prot_gru_2_candidate_bias = []
for v in tf.global_variables():
    if "GRU/GRU/GRUCell/Gates/Linear/Matrix" in v.name:
        prot_gru_1_gate_matrix.append(v)
    elif "GRU/GRU/GRUCell/Candidate/Linear/Matrix" in v.name:
Ejemplo n.º 31
0
    # y_train = load_array(data/+'train_labels.bc')
    # x_val = load_array(data/+'val_data.bc')
    # y_val = load_array(data/+'val_labels.bc')
    # embedding_matrix = load_array(data/+'embedding_matrix.bc')

    # The syntax with tflearn is almost identical to keras. Only differences
    # are: there is no need to flatten a tensor before passing it to a
    # fully_connected layer, since the fc layer will take care  of that
    # automatically. Also, the default padding in tflearn is 'same', while in
    # keras is 'valid'.
    net = input_data(shape=[None,MAX_SEQUENCE_LENGTH], name='input')
    net = embedding(net, input_dim=MAX_NB_WORDS, output_dim=EMBEDDING_DIM, trainable=False, name="EmbeddingLayer")
    net = conv_1d(net, 128, 5, 1, activation='relu', padding="valid")
    # one could add regularization as:
    # net = conv_1d(net, 128, 5, 1, activation='relu', regularizer="L2", padding="valid")
    net = max_pool_1d(net, 5, padding="valid")
    net = batch_normalization(net)
    net = conv_1d(net, 128, 5, activation='relu', padding="valid")
    net = max_pool_1d(net, 5, padding="valid")
    net = batch_normalization(net)
    net = conv_1d(net, 128, 5, activation='relu', padding="valid")
    net = max_pool_1d(net, 35)
    net = batch_normalization(net)
    net = fully_connected(net, 128, activation='relu')
    net = dropout(net, 0.5)
    net = fully_connected(net, y_train.shape[1], activation='softmax')
    net = regression(net, optimizer='adam', learning_rate=0.01,
                         loss='categorical_crossentropy', name='target')
    model = tflearn.DNN(net, tensorboard_verbose=0)
    embeddingWeights = tflearn.get_layer_variables_by_name('EmbeddingLayer')[0]
    model.set_weights(embeddingWeights, embedding_matrix)
Ejemplo n.º 32
0
def model_fn(net,
             X_len,
             max_reach,
             block_size,
             out_classes,
             batch_size,
             k,
             reuse=False):
    """
        Args:
        net -> Input tensor shaped (batch_size, max_reach + block_size + max_reach, 3)
        Returns:
        logits -> Unscaled logits tensor in time_major form, (block_size, batch_size, out_classes)
    """

    print("model in", net.get_shape())
    with tf.name_scope("model"):
        for j in range(3):
            with tf.variable_scope("block%d" % (j + 1)):
                for i, no_channel in zip([1, 4, 16],
                                         np.array([64, 64, 128]) * (2**j)):
                    with tf.variable_scope("atrous_conv1d_%d" % i):
                        filter = tf.get_variable("W",
                                                 shape=(3, net.get_shape()[-1],
                                                        no_channel))
                        bias = tf.get_variable("b", shape=(no_channel, ))
                        net = atrous_conv1d(net, filter, i,
                                            padding="VALID") + bias
                        net = tf.nn.relu(net)
                net = tf.Print(net, [tf.shape(net)],
                               first_n=5,
                               message="net, pre_pool")
                net = max_pool_1d(net, 2)
        print("after conv", net.get_shape())
        net = tf.transpose(net, [1, 0, 2], name="Shift_to_time_major")

        outputs = net

        state_size = 128

        with tf.name_scope("RNN"):
            cell = tf.nn.rnn_cell.GRUCell(state_size)
            cell = tf.nn.rnn_cell.MultiRNNCell([cell for _ in range(3)])
            init_state = cell.zero_state(batch_size, dtype=tf.float32)
            outputs, final_state = tf.nn.dynamic_rnn(cell,
                                                     net,
                                                     initial_state=init_state,
                                                     sequence_length=X_len,
                                                     time_major=True,
                                                     parallel_iterations=128)

        outputs = tf.Print(outputs, [tf.shape(outputs)],
                           first_n=1,
                           message="outputs_pre_w")
        print("outputs", outputs.get_shape())
        with tf.variable_scope("Output"):
            outputs = tf.reshape(outputs, [-1, state_size])
            W = tf.get_variable("W", shape=[state_size, out_classes])
            b = tf.get_variable("b", shape=[out_classes])
            outputs = tf.matmul(outputs, W) + b
            logits = tf.reshape(outputs,
                                [block_size // 8, batch_size, out_classes])
    print("model out", logits.get_shape())
    return {
        'logits': logits,
        'init_state': init_state,
        'final_state': final_state,
        'reg': k * ops.running_mean(logits, [5, 6, 7], [4, 8, 16], out_classes)
    }
Ejemplo n.º 33
0
def my_model(img_prep, img_aug):

    # dropout_probability = 0.5
    dropout_probability = 1.0
    initial_learning_rate = 0.0001
    learning_decay = 1E-5
    initializer = 'uniform_scaling'
    # initializer = 'truncated_normal'
    activation_function = 'relu'
    #     activation_function = 'sigmoid'
    objective_function = 'categorical_crossentropy'
    # objective_function = 'mean_square'
    chooseme = R2()

    network = input_data(shape=[None, 1024, 1],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
    # network = conv_2d(network, 32, 3, strides=1, padding='same', activation=activation_function,
    #                  bias=True, bias_init='zeros', weights_init=initializer,
    #                  regularizer='L2')
    network = conv_1d(network,
                      32,
                      4,
                      strides=1,
                      padding='same',
                      activation=activation_function,
                      bias=True,
                      bias_init='zeros',
                      weights_init=initializer)
    # network = max_pool_2d(network, 2, strides=None, padding='same')
    # temporarily removing this puppy
    network = max_pool_1d(network, 2, strides=None, padding='same')
    # network = conv_2d(network, 64, 3, strides=1, padding='same', activation=activation_function,
    #                   bias=True, bias_init='zeros', weights_init=initializer)
    network = conv_1d(network,
                      64,
                      4,
                      strides=1,
                      padding='same',
                      activation=activation_function,
                      bias=True,
                      bias_init='zeros',
                      weights_init=initializer)
    # network = conv_2d(network, 64, 3, strides=1, padding='same', activation=activation_function,
    #                   bias=True, bias_init='zeros', weights_init=initializer)
    network = conv_1d(network,
                      64,
                      4,
                      strides=1,
                      padding='same',
                      activation=activation_function,
                      bias=True,
                      bias_init='zeros',
                      weights_init=initializer)
    # network = max_pool_2d(network, 2, strides=None, padding='same')
    network = max_pool_1d(network, 2, strides=None, padding='same')
    network = fully_connected(network, 512, activation=activation_function)
    network = dropout(network, dropout_probability)
    network = fully_connected(network, 7, activation='softmax')
    network = regression(network,
                         optimizer='adam',
                         loss=objective_function,
                         learning_rate=initial_learning_rate,
                         metric=chooseme)
    # sgd = SGD(learning_rate=initial_learning_rate, lr_decay=learning_decay, decay_step=90)
    # network = regression(network, optimizer=sgd,
    #                      loss='categorical_crossentropy')
    return network
def build_tflearn_ann(length):
    input_layer = input_data(shape=[None, length, 1])

    pool_layer_1 = max_pool_1d(input_layer, 10, name='pool_layer_1')
    pool_layer_2 = max_pool_1d(pool_layer_1, 5, name='pool_layer_2')
    pool_layer_3 = max_pool_1d(pool_layer_2, 5, name='pool_layer_3')
    pool_layer_4 = max_pool_1d(pool_layer_3, 5, name='pool_layer_3')

    fully_connect_1 = fully_connected(pool_layer_3,
                                      512,
                                      activation='relu',
                                      name='fully_connect_1',
                                      weights_init='xavier',
                                      regularizer="L2")

    fully_connect_2 = fully_connected(pool_layer_2,
                                      512,
                                      activation='relu',
                                      name='fully_connect_2',
                                      weights_init='xavier',
                                      regularizer="L2")

    fully_connect_3 = fully_connected(pool_layer_1,
                                      512,
                                      activation='relu',
                                      name='fully_connect_3',
                                      weights_init='xavier',
                                      regularizer="L2")

    fully_connect_4 = fully_connected(pool_layer_4,
                                      512,
                                      activation='relu',
                                      name='fully_connect_3',
                                      weights_init='xavier',
                                      regularizer="L2")
    # Merge above layers
    merge_layer = tflearn.merge_outputs(
        [fully_connect_1, fully_connect_2, fully_connect_3, fully_connect_4])
    # merge_layer = tflearn.merge_outputs(
    #     [fully_connect_1, fully_connect_2, fully_connect_3, fully_connect_4, fully_connect_5])
    # merge_layer = tflearn.merge_outputs(
    #     [fully_connect_1, fully_connect_2, fully_connect_3, fully_connect_4, fully_connect_5, fully_connect_6,
    #      fully_connect_7, fully_connect_8, fully_connect_9, fully_connect_10])
    drop_2 = dropout(merge_layer, 0.25)

    fc_layer_4 = fully_connected(drop_2,
                                 2048,
                                 activation='relu',
                                 name='fc_layer_4',
                                 regularizer='L2',
                                 weights_init='xavier',
                                 weight_decay=0.001)
    drop_2 = dropout(fc_layer_4, drop_out_prob)

    fc_layer_5 = fully_connected(drop_2,
                                 1024,
                                 activation='relu',
                                 name='fc_layer_5',
                                 regularizer='L2',
                                 weights_init='xavier',
                                 weight_decay=0.001)
    drop_3 = dropout(fc_layer_5, drop_out_prob)

    fc_layer_6 = fully_connected(drop_3,
                                 128,
                                 activation='relu',
                                 name='fc_layer_5',
                                 regularizer='L2',
                                 weights_init='xavier',
                                 weight_decay=0.001)
    drop_4 = dropout(fc_layer_6, drop_out_prob)

    # Output
    fc_layer_2 = fully_connected(drop_4,
                                 3,
                                 activation='softmax',
                                 name='output')
    network = regression(fc_layer_2,
                         optimizer='adam',
                         loss='softmax_categorical_crossentropy',
                         learning_rate=0.0001,
                         metric='Accuracy')
    model = tflearn.DNN(network)
    return model
Ejemplo n.º 35
0
def main():
    pickle_folder = '../pickles_rolloff'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
    pickle_folders_to_load = sorted(pickle_folders_to_load)
    length = len(pickle_folders_to_load)

    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'beat_spec'
    beat_spec_len = 432
    n_folds = 10
    n_epochs = 200
    take = 1

    output_folder = 'cross_{0}_folds_{1}_epochs_take{2}/'.format(n_folds, n_epochs, take)

    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    summary_file = output_folder + 'summary_{0}_folds_{1}_epochs.txt'.format(n_folds, n_epochs)

    # set up training, testing, & validation partitions
    beat_spec_array, sdr_array = load_beat_spec_and_sdrs(pickle_folders_to_load, pickle_folder,
                                                         feature, fg_or_bg, sdr_type)

    perm = np.random.permutation(len(pickle_folders_to_load))  # random permutation of indices
    folds = np.array_split(perm, n_folds)  # splits into folds
    predicted = []

    for fold in range(n_folds):
        train_beat_spec = np.expand_dims([beat_spec_array[i] for i in range(length) if i not in folds[fold]], -1)
        train_sdr = np.expand_dims([sdr_array[i] for i in range(length) if i not in folds[fold]], -1)
        test_beat_spec = np.expand_dims([beat_spec_array[i] for i in folds[fold]], -1)
        test_sdr = np.expand_dims([sdr_array[i] for i in folds[fold]], -1)

        with tf.Graph().as_default():
            # Building convolutional network
            network = input_data(shape=[None, beat_spec_len, 1])
            network = conv_1d(network, 32, 4, activation='relu', regularizer="L2")
            network = max_pool_1d(network, 2)
            network = conv_1d(network, 64, 80, activation='relu', regularizer="L2")
            network = max_pool_1d(network, 2)
            network = fully_connected(network, 128, activation='relu')
            network = dropout(network, 0.8)
            network = fully_connected(network, 256, activation='relu')  # look for non-tanh things???
            network = dropout(network, 0.8)
            network = fully_connected(network, 1, activation='linear')
            regress = tflearn.regression(network, optimizer='rmsprop', loss='mean_square', learning_rate=0.001)

            start = time.time()
            # Training
            model = tflearn.DNN(regress)#, session=sess)
            model.fit(train_beat_spec, train_sdr, n_epoch=n_epochs,
                      snapshot_step=1000, show_metric=True,
                      run_id='relus_{0}_{1}_of_{2}'.format(n_epochs, fold+1, n_folds))
            elapsed = (time.time() - start)

        prediction = np.array(model.predict(test_beat_spec))[:, 0]
        with open(output_folder + 'predictions_fold{}.txt'.format(fold + 1), 'a') as f:
            f.write('Training avg = {} \n \n'.format(np.mean(train_sdr)))
            f.write('Actual \t Predicted \n')
            for i in range(len(prediction)):
                f.write('{0} \t {1} \n'.format(test_sdr[i][0], prediction[i]))
        pprint.pprint(prediction)
        predicted.append(prediction)
        with open(summary_file, 'a') as f:
            mse = np.square(test_sdr - prediction).mean()
            f.write('Fold {0}\t mse = {1} dB \t time = {2} min \n'.format(fold + 1, mse, elapsed / 60.))
        plot(test_sdr, prediction, output_folder + 'scatter_fold{}.png'.format(fold + 1))

        tf.reset_default_graph()

    predicted = np.array(predicted).flatten()
    print("Test MSE: ", np.square(sdr_array - predicted).mean())
    plot(sdr_array, predicted, output_folder + 'scatter_all_folds_{}_epochs.png'.format(n_epochs))
Ejemplo n.º 36
0
def main(argv):
    inputfile = ''
    outputfile = ''
    try:
        opts, args = getopt.getopt(argv, "hi:o:", ["ifile=", "ofile="])
    except getopt.GetoptError:
        print 'python cnn-blpred.py -i <inputfile> -o <outputfile>'
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print 'test.py -i <inputfile> -o <outputfile>'
            sys.exit()
        elif opt in ("-i", "--ifile"):
            inputfile = arg
        elif opt in ("-o", "--ofile"):
            outputfile = arg
    #print 'Input file is "', inputfile
    #print 'Output file is "', outputfile
    if not os.path.exists('infile'):
        os.makedirs('infile')
    if not os.path.exists('outfile'):
        os.makedirs('outfile')
    copyfile(inputfile, 'infile/' + inputfile)
    print('Extracting CKSAAP features!')
    feps.feps('infile/', 'outfile/')
    copyfile('outfile/' + inputfile + '_full_features.csv',
             inputfile.split('.')[0] + '.csv')
    copyfile('outfile/' + inputfile + '_proteinFeaturesList.txt',
             inputfile.split('.')[0] + '.txt')
    rmtree('infile/')
    rmtree('outfile/')
    print('Reading Features!!')
    f = open(inputfile.split('.')[0] + '.txt', 'r')
    trainPFL = f.read().splitlines()
    f.close()
    df = pd.read_csv(inputfile.split('.')[0] + '.csv',
                     index_col=False,
                     names=trainPFL)
    filenames = [
        'BL_Level_1', 'BL_Class_A', 'BL_Class_B', 'BL_Class_C', 'BL_Class_D',
        'BL_Group_2'
    ]
    for filename in filenames:
        print('Predicting ' + filename + '!')
        f1 = open('models/feature_selection/' + filename + '_XGB_FS.pkl', 'rb')
        xgb = pickle.load(f1)
        f1.close()
        f1 = open('models/feature_selection/' + filename + '_vocabulary.pkl',
                  'rb')
        vocabulary = pickle.load(f1)
        f1.close()
        model = SelectFromModel(xgb, prefit=True)
        df_new = model.transform(df)

        input_layer = tflearn.input_data(shape=[None, df_new.shape[1]],
                                         name='input')
        embedding_layer = tflearn.embedding(input_layer,
                                            input_dim=vocabulary,
                                            output_dim=128,
                                            validate_indices=True)
        conv_layer = conv_1d(embedding_layer,
                             256,
                             4,
                             padding='same',
                             activation='tanh',
                             regularizer='L2')
        maxpool_layer = max_pool_1d(conv_layer, 2)
        dropout = tflearn.dropout(maxpool_layer, 0.5)
        softmax = tflearn.fully_connected(dropout, 2, activation='softmax')
        regression = tflearn.regression(softmax,
                                        optimizer='adam',
                                        learning_rate=0.001,
                                        loss='categorical_crossentropy',
                                        name='target')
        clf = tflearn.DNN(regression, tensorboard_verbose=3)
        clf.load('models/classification/' + filename + '/' + filename +
                 '_model.tfl')
        predicted = clf.predict_label(df_new)[:, 1]
        score = clf.predict(df_new)
        if not os.path.exists('results'):
            os.makedirs('results')
        np.savetxt('results/' + outputfile + '_' + filename +
                   '_predict_label.csv',
                   predicted,
                   delimiter=',')
        np.savetxt('results/' + outputfile + '_' + filename +
                   '_predict_score.csv',
                   score,
                   delimiter=',')
        tf.reset_default_graph()
        del vocabulary, df_new, f1, input_layer, embedding_layer, conv_layer, maxpool_layer, dropout, softmax, regression, clf, predicted, score, model, xgb
        gc.collect()
    os.remove(inputfile.split('.')[0] + '.csv')
    os.remove(inputfile.split('.')[0] + '.txt')
Ejemplo n.º 37
0
# Sequence padding
trainX = pad_sequences(trainX, maxlen=10, value=0.)
testX = pad_sequences(testX, maxlen=10, value=0.)
# # # Converting labels to binary vectors
trainY = to_categorical(trainY, 6)
testY = to_categorical(testY, 6)

network = input_data(shape=[None, 10], name='input')
network = tflearn.embedding(network, input_dim=1000, output_dim=128)
branch1 = conv_1d(network,
                  128,
                  3,
                  padding='valid',
                  activation='relu',
                  regularizer="L2")
branch1 = max_pool_1d(branch1, 2)
branch2 = conv_1d(network,
                  128,
                  4,
                  padding='valid',
                  activation='relu',
                  regularizer="L2")
branch2 = max_pool_1d(branch2, 2)
branch3 = conv_1d(network,
                  128,
                  5,
                  padding='valid',
                  activation='relu',
                  regularizer="L2")
branch3 = max_pool_1d(branch3, 2)
network = merge([branch1, branch2, branch3], mode='concat', axis=1)
Ejemplo n.º 38
0
matrix_size = opts.matrix_size

X, Y = image_preloader('/scratch/data.txt',
                       image_shape=(matrix_size, matrix_size, 1),
                       mode='file',
                       categorical_labels=True,
                       normalize=False)

conv_input = input_data(shape=[None, matrix_size, matrix_size], name='input')
conv = conv_1d(conv_input,
               100,
               filter_size=50,
               activation='leaky_relu',
               strides=2)
conv1 = conv_1d(conv_input, 50, 1, activation='leaky_relu', strides=1)
conv1 = max_pool_1d(conv1, kernel_size=2, strides=2)
convnet = merge([conv, conv1], mode='concat', axis=2)
convnet = conv_1d(convnet, 30, filter_size=1, activation='relu')
#convnet = dropout(convnet, 0.35) -- Currently disabled (can be included if generalization is necessary)
convnet = fully_connected(convnet, 3, activation='softmax')
convnet = regression(convnet,
                     optimizer='adam',
                     learning_rate=opts.lr,
                     loss='categorical_crossentropy')
model = tflearn.DNN(convnet,
                    tensorboard_verbose=3,
                    tensorboard_dir='/modelState/Tensordboard/')
'''model.fit(X, Y, n_epoch=opts.epoch, validation_set=0.2, show_metric=True, batch_size=opts.bs, snapshot_step=100, 
    snapshot_epoch=False, run_id='Digital Mammography ConCaDNet')'''

model.load('/modelState/model.tflearn')