def TCN(input_layer, output_size, num_channels, sequence_length, kernel_size, dropout): # tcn is of shape (batch_size, seq_len, num_channels[-1](usually hidden size)) tcn = TemporalConvNet(input_layer=input_layer, num_channels=num_channels, sequence_length=sequence_length, kernel_size=kernel_size, dropout=dropout) linear = tf.contrib.layers.fully_connected(tcn[:, -1, :], output_size, activation_fn=None) return linear
def TCN(input_layer, output_size, num_channels, embedding_size, kernel_size, dropout, bn_switch=None, init=False): """ shapes: input_layer: b_s, L contains the integer ID output_size should be vocab_size """ initrange = 0.1 keep_prob_emb = 1.0 - dropout sequence_length = input_layer.get_shape()[-1] embeddings = tf.get_variable('embedding', shape=[output_size, embedding_size], dtype=tf.float32, initializer=tf.initializers.random_uniform( -initrange, initrange)) embedded_input = tf.nn.embedding_lookup(embeddings, input_layer) drop = tf.nn.dropout(embedded_input, keep_prob_emb) tcn = TemporalConvNet(input_layer=drop, num_channels=num_channels, sequence_length=sequence_length, kernel_size=kernel_size, dropout=dropout, init=init) decoder_b = tf.get_variable('b_h', shape=[output_size], dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=True) decoder = tf.nn.bias_add( tf.nn.convolution(tcn, tf.expand_dims(tf.transpose(embeddings), axis=0), 'SAME'), decoder_b) return decoder
def __init__(self, tcn_params, cnp_params, class_imb, L2_penalty): #n_channels, levels, kernel_size, dropout, self.n_classes = tcn_params #unpack tcn parameters n_channels = tcn_params['n_channels'] levels = tcn_params['levels'] kernel_size = tcn_params['kernel_size'] dropout = tcn_params['dropout'] self.n_classes = tcn_params['n_classes'] self.n_data_channels = cnp_params['num_channels'] #encoder_output_sizes = cnp_params['encoder_output_sizes'] #decoder_output_sizes = cnp_params['decoder_output_sizes'] encoder_output_sizes = [cnp_params['encoder_output_size'] for i in range(cnp_params['encoder_levels']) ] decoder_output_sizes = [cnp_params['decoder_output_size'] for i in range(cnp_params['decoder_levels']-1)] decoder_output_sizes.append(2) self.cnp = DeterministicModel(encoder_output_sizes, decoder_output_sizes,self.n_data_channels, compute_loss= True) self.tcn = TemporalConvNet([n_channels] * levels, kernel_size, dropout) self.class_imb = class_imb self.L2_penalty = L2_penalty
def TCN(input_layer, output_size, num_channels, embedding_size, kernel_size, dropout, init=False): """ shapes: input_layer: b_s, L contains the integer ID output_size should be vocab_size """ initrange = 0.1 keep_prob_emb = 1.0 sequence_length = input_layer.get_shape()[-1] embeddings = tf.get_variable('embedding', shape=[output_size, embedding_size], dtype=tf.float32, initializer=tf.initializers.random_uniform( -initrange, initrange)) embedded_input = tf.nn.embedding_lookup(embeddings, input_layer) drop = tf.nn.dropout(embedded_input, keep_prob_emb) tcn = TemporalConvNet(input_layer=drop, num_channels=num_channels, sequence_length=sequence_length, kernel_size=kernel_size, dropout=dropout, init=init) decoder = tf.contrib.layers.fully_connected( tcn, output_size, activation_fn=None, weights_initializer=tf.initializers.random_uniform( -initrange, initrange)) return decoder
}, { 'filter_count': 128, 'kernel_size': 15, 'dilation_rate': 3, 'activation_func': tf.nn.elu, 'res_activation_func': tf.nn.elu, 'dropout_rate': 0 }] # data constants TOTAL_DAYS = 746 DPS_PER_DAY = 270 # build model print('Building model...') net = TemporalConvNet('temp-convnet', BLOCK_CONFIG, NUM_CLASSES) net.build((None, TIMESTEPS, INPUT_CHANNELS)) net.summary() # compile model print('\nCompiling model...') net.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss=MulticlassBinaryCrossEntropy(), metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall()]) # get data sequence print('\nFetching data sequence...') seq = ProcessedSequence(50, 0.1, 0.1) validation_set = seq.get_validation_set() test_set = seq.get_test_set()
data.append(val) except StopIteration: raw_data.append(data) break return raw_data # fetch data x = preload_data('../data/{}.nosync/{}m-x.csv'.format(TICKER, MULT)) y = preload_data('../data/{}.nosync/{}m-y.csv'.format(TICKER, MULT)) # build model print('Building model...') net = TemporalConvNet('temp-convnet', BLOCK_CONFIG, NUM_CLASSES) net.build((None, WINDOW_BARS_SIZE, INPUT_CHANNELS)) net.summary() # compile model print('\nCompiling model...') net.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss=tf.keras.losses.BinaryCrossentropy(), metrics=[ tf.keras.metrics.Accuracy(), tf.keras.metrics.Precision(), tf.keras.metrics.Recall() ]) # train model for epoch in range(EPOCHS):
def TCN(input_layer, output_size, num_channels, sequence_length, kernel_size, dropout, init=False): tcn = TemporalConvNet(input_layer=input_layer, num_channels=num_channels, sequence_length=sequence_length, kernel_size=kernel_size, dropout=dropout, init=init) linear = tf.contrib.layers.fully_connected(tcn[:, -1, :], output_size, activation_fn=None, weights_initializer=tf.initializers.random_normal(0, 0.01)) return linear