def RMDN_train(hidden_states, n_mixtures, input_shape, summary=True):
    """
    Function that returns RMDN model for training. Here the recurrent layer has
    return_sequences=True and is stateless.

    :param hidden_states: dimension of the LSTM state.
    :param n_mixtures: number of output mixture densities.
    :param input_shape: the input shape like (time_steps, c3d_encoding).
    :param summary: optional, whether or not to print summary.
    :return: a Keras model.
    """
    time_steps, _ = input_shape

    sequence_in = Input(shape=input_shape, name='input')

    state = LSTM(output_dim=hidden_states,
                 return_sequences=True,
                 name='recurrent_module')(sequence_in)
    state = Dropout(0.5)(state)

    # Mixture Density Inference
    # mixture components weights
    weight = TimeDistributedDense(output_dim=n_mixtures * 1,
                                  name='output_weight')(state)
    weight = Reshape(target_shape=(time_steps, n_mixtures))(weight)
    weight = Activation('softmax')(weight)
    weight = Reshape(target_shape=(time_steps, n_mixtures, 1))(weight)

    # gaussian mean
    mu = TimeDistributedDense(output_dim=n_mixtures * 2,
                              name='output_mean')(state)
    mu = Reshape(target_shape=(time_steps, n_mixtures, 2))(mu)
    mu = Activation('relu')(mu)  # this must become linear

    # variance
    sigma = TimeDistributedDense(output_dim=n_mixtures * 2,
                                 name='output_var')(state)
    sigma = Reshape(target_shape=(time_steps, n_mixtures, 2))(sigma)
    sigma = Lambda(lambda x: K.exp(x) + 1,
                   output_shape=(time_steps, n_mixtures, 2))(sigma)

    # correlation
    ro = TimeDistributedDense(output_dim=n_mixtures, name='output_corr')(state)
    ro = Reshape(target_shape=(time_steps, n_mixtures, 1))(ro)
    ro = Activation('tanh')(ro)

    md = merge([weight, mu, sigma, ro], mode='concat', concat_axis=-1)

    model = Model(input=sequence_in, output=md)

    if summary:
        model.summary()

    return model
Exemplo n.º 2
0
def create_temporal_sequential_model():
    model = Sequential()
    model.add(
        GRU(32, input_shape=(timesteps, input_dim), return_sequences=True))
    model.add(TimeDistributedDense(nb_classes))
    model.add(Activation('softmax'))
    return model
def model_define():
    model = Sequential()
    model.add(
        TimeDistributed(Convolution2D(nb_filter, 3, 3, border_mode="same"),
                        input_shape=[lstm_output_size, 1, 172, 224]))
    model.add(TimeDistributed(Activation("relu")))
    model.add(TimeDistributed(MaxPooling2D(pool_size=pool_size)))
    print(model.output_shape)

    #
    model.add(TimeDistributed(Convolution2D(1, 3, 3, border_mode="same")))
    model.add(TimeDistributed(Activation("relu")))
    model.add(TimeDistributed(MaxPooling2D(pool_size=pool_size)))
    print(model.output_shape)
    #

    model.add(TimeDistributed(Flatten()))
    print(model.output_shape)

    #    model.add(LSTM(256, return_sequences=True,
    #                   input_shape=(lstm_output_size, 172*224)))
    model.add(
        LSTM(256,
             return_sequences=True,
             input_shape=(lstm_output_size, (172 / 4) * (224 / 4))))
    print(model.output_shape)
    model.add(LSTM(128, return_sequences=True))
    print(model.output_shape)
    model.add(LSTM(32, return_sequences=True))
    print(model.output_shape)
    model.add(TimeDistributedDense(2))
    model.add(Activation('linear'))
    print(model.output_shape)

    return model
Exemplo n.º 4
0
    def __init__(self, input_dim, maxlen, lstm_size, output_dim):
        Model.__init__(self)
        self.classifier = None
        self.maxlen = maxlen
        model = Sequential()
        model.add(Masking(input_shape=(None, input_dim)))
        #   model.add(Embedding(max_features, 128, input_length=maxlen))
        model.add(
            LSTM(lstm_size,
                 input_dim=input_dim))  # try using a GRU instead, for fun
        #model.add(Dense(lstm_size, output_dim))
        model.add(Dense(lstm_size, input_dim=lstm_size))
        model.add(Activation('relu'))
        model.add(RepeatVector(maxlen))
        model.add(
            LSTM(lstm_size, input_dim=lstm_size,
                 return_sequences=True))  # try using a GRU instead, for fun

        model.add(
            TimeDistributedDense(output_dim=output_dim, input_dim=lstm_size))
        model.add(Activation('softmax'))

        # try using different optimizers and different optimizer configs
        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      sample_weight_mode="temporal")
        self.classifier = model
Exemplo n.º 5
0
    def __init__(self, input_dim=None,maxlen=None, lstm_size=None, output_dim=None, load_from_file = None, weight_file = None, temp="."):
        Model.__init__(self)
        if load_from_file is None:
            self.classifier = None
            self.maxlen=maxlen
            model = Sequential()
            model.add(Masking(input_shape=(None,input_dim)))
         #   model.add(Embedding(max_features, 128, input_length=maxlen))


            lstm = LSTM(input_dim=input_dim, output_dim=lstm_size//2,return_sequences=True)
            gru = GRU(input_dim=input_dim, output_dim=lstm_size//2, go_backwards=True, return_sequences=True)  # original examples was 128, we divide by 2 because results will be concatenated
            brnn = Bidirectional(forward=lstm, backward=gru, return_sequences=True)

            model.add(brnn)  # try using another Bidirectional RNN inside the Bidirectional RNN. Inception meets callback hell.


            lstm2 = LSTM(input_dim=lstm_size, output_dim=lstm_size,return_sequences=True)
            gru2 = GRU(input_dim=lstm_size, output_dim=lstm_size, go_backwards=True, return_sequences=True)  # original examples was 128, we divide by 2 because results will be concatenated
            brnn2 = Bidirectional(forward=lstm2, backward=gru2, return_sequences=True)

           # model.add(LSTM(lstm_size, input_dim=input_dim, return_sequences=True))  # try using a GRU instead, for fun
            #model.add(Dense(lstm_size, output_dim))
            model.add(brnn2)
            model.add(TimeDistributedDense(output_dim=output_dim, input_dim=lstm_size*2))
            model.add(Activation('softmax'))

            # try using different optimizers and different optimizer configs
            model.compile(loss='categorical_crossentropy', optimizer='rmsprop',sample_weight_mode="temporal")
            self.classifier=model
        else:
            self.classifier = model_from_json(load_from_file)
            self.classifier.load_weights(temp + "/" + 'my_model_weights.h5')
    def build(self):
        dim_data = self.size_of_input_data_dim
        nb_time_step = self.size_of_input_timesteps
        financial_time_series_input = Input(shape=(nb_time_step, dim_data))

        lstm_layer_1 = LSTM(output_dim=nb_hidden_units,
                            dropout_U=dropout,
                            dropout_W=dropout,
                            inner_activation='sigmoid',
                            W_regularizer=l2(l2_norm_alpha),
                            b_regularizer=l2(l2_norm_alpha),
                            activation='tanh',
                            return_sequences=True)
        lstm_layer_2 = LSTM(output_dim=nb_hidden_units,
                            dropout_U=dropout,
                            dropout_W=dropout,
                            inner_activation='sigmoid',
                            W_regularizer=l2(l2_norm_alpha),
                            b_regularizer=l2(l2_norm_alpha),
                            activation='tanh',
                            return_sequences=True)

        h1 = lstm_layer_1(financial_time_series_input)
        h2 = lstm_layer_2(h1)
        time_series_predictions = TimeDistributedDense(1)(h2)
        self.model = Model(
            financial_time_series_input,
            time_series_predictions,
            name="deep rnn for financial time series forecasting")
Exemplo n.º 7
0
def test_sequence_to_sequence():
    '''
    Apply a same Dense layer for each element of time dimension of the input
    and make predictions of the output sequence elements.
    This does not make use of the temporal structure of the sequence
    (see TimeDistributedDense for more details)
    '''
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=500,
                                                         nb_test=200,
                                                         input_shape=(3, 5),
                                                         output_shape=(3, 5),
                                                         classification=False)

    model = Sequential()
    model.add(
        TimeDistributedDense(y_train.shape[-1],
                             input_shape=(X_train.shape[1], X_train.shape[2])))
    model.compile(loss='hinge', optimizer='rmsprop')
    history = model.fit(X_train,
                        y_train,
                        nb_epoch=20,
                        batch_size=16,
                        validation_data=(X_test, y_test),
                        verbose=0)
    assert (history.history['val_loss'][-1] < 0.8)
Exemplo n.º 8
0
def create_temporal_graph_model():
    model = Graph()
    model.add_input(name='input', input_shape=(timesteps, input_dim))
    model.add_node(GRU(32, return_sequences=True), name='d1', input='input')
    model.add_node(TimeDistributedDense(nb_classes, activation='softmax'),
                   name='d2',
                   input='d1')
    model.add_output(name='output', input='d2')
    return model
Exemplo n.º 9
0
def get_language_model():
	''' returns language model'''

	language_model = Sequential()

	language_model.add(Embedding(vocab_size, 4096, input_length =  max_caption_length))
	language_model.add(GRU(output_dim=4096, return_sequences=True))
	language_model.add(TimeDistributedDense(4096))

	return language_model
Exemplo n.º 10
0
def test_masked_temporal():
    '''
    Confirm that even with masking on both inputs and outputs, cross-entropies are
    of the expected scale.

    In this task, there are variable length inputs of integers from 1-9, and a random
    subset of unmasked outputs. Each of these outputs has a 50% probability of being
    the input number unchanged, and a 50% probability of being 2*input%10.

    The ground-truth best cross-entropy loss should, then be -log(0.5) = 0.69

    '''
    np.random.seed(55318)
    model = Sequential()
    model.add(Embedding(10, 20, mask_zero=True, input_length=20))
    model.add(TimeDistributedDense(10))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  sample_weight_mode='temporal')

    X = np.random.random_integers(1, 9, (50000, 20))
    for rowi in range(X.shape[0]):
        padding = np.random.random_integers(X.shape[1] / 2)
        X[rowi, :padding] = 0

    # 50% of the time the correct output is the input.
    # The other 50% of the time it's 2 * input % 10
    y = (X * np.random.random_integers(1, 2, X.shape)) % 10
    Y = np.zeros((y.size, 10), dtype='int32')
    for i, target in enumerate(y.flat):
        Y[i, target] = 1
    Y = Y.reshape(y.shape + (10, ))

    # Mask 50% of the outputs via sample weights
    sample_weight = np.random.random_integers(0, 1, y.shape)
    print('X shape:', X.shape)
    print('Y shape:', Y.shape)
    print('sample_weight shape:', Y.shape)

    history = model.fit(X,
                        Y,
                        validation_split=0.05,
                        sample_weight=None,
                        verbose=1,
                        nb_epoch=2)
    ground_truth = -np.log(0.5)
    assert (np.abs(history.history['val_loss'][-1] - ground_truth) < 0.06)
Exemplo n.º 11
0
def build_sampling_model():
    print('Build model...')
    model = Sequential()
    model.add(
        Embedding(vocab_size,
                  vocab_size,
                  mask_zero=True,
                  init='identity',
                  batch_input_shape=(1, 1)))
    model.add(LSTM(512, return_sequences=True, stateful=True))
    model.add(Dropout(0.2))
    model.add(LSTM(512, return_sequences=True, stateful=True))
    model.add(Dropout(0.2))
    model.add(TimeDistributedDense(vocab_size, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    return model
Exemplo n.º 12
0
def model_define():
    model = Sequential()

    model.add(
        LSTM(256,
             return_sequences=True,
             input_shape=(lstm_output_size, 172 * 224)))
    print(model.output_shape)
    model.add(LSTM(128, return_sequences=True))
    print(model.output_shape)
    model.add(LSTM(32, return_sequences=True))
    print(model.output_shape)
    model.add(TimeDistributedDense(2))
    model.add(Activation('linear'))
    print(model.output_shape)

    return model
Exemplo n.º 13
0
	def getModel(self, params, weight=None  ):
		
		lstm_cell_size = params['lstm_cell_size']
		print "params['embeddings_dim'] = ", params['embeddings_dim']
		print "lstm_cell_size= ", lstm_cell_size
		inp = Input(shape=(params['inp_length'],), dtype='int32', name="inp")
		embedding = Embedding(input_dim = params['vocab_size']+1, output_dim = params['embeddings_dim'],
			input_length = params['inp_length'],
			dropout=0.2,
			mask_zero=True,
			trainable=True) (inp)
		lstm_out = LSTM(lstm_cell_size, return_sequences=True)(embedding)
		out = TimeDistributedDense(params['vocab_size'], activation='softmax')(lstm_out)
		model = Model(input=[inp], output=[out])
		model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] )
		print model.summary()

		return model
def train_breaker(datafilename, sentence_num=1000, puncs=u',,.。!!??', \
			RNN=recurrent.GRU, HIDDEN_SIZE=128, EPOCH_SIZE=10, validate=True):
	wordtable = WordTable()
	wordtable.parse(datafilename, sentence_num)

	X, Y = [], []
	for line in open(datafilename).readlines()[:sentence_num]:
		line = line.strip().decode('utf-8')
		line = re.sub(ur'(^[{0}]+)|([{0}]+$)'.format(puncs),'',line)
		words = wordtable.encode(re.sub(ur'[{0}]'.format(puncs),'',line))
		breaks = re.sub(ur'0[{0}]+'.format(puncs),'1',re.sub(ur'[^{0}]'.format(puncs),'0',line))
		if len(words) >= 30 and len(words) <= 50 and breaks.count('1') >= 4:
			x = np.zeros((len(words), wordtable.capacity), dtype=np.bool)
			y = np.zeros((len(breaks), 2), dtype=np.bool)
			for idx in xrange(len(words)):
				x[idx][words[idx]] = True
				y[idx][int(breaks[idx])] = True
			X.append(x)
			Y.append(y)
	print 'total sentence: ', len(X)

	if validate:
		# Set apart 10% for validation
		split_at = len(X) - len(X)/10
		X_train, X_val = X[:split_at], X[split_at:]
		y_train, y_val = Y[:split_at], Y[split_at:]
	else:
		X_train, y_train = X, Y

	model = Graph()
	model.add_input(name='input', input_shape=(None, wordtable.capacity))
	model.add_node(RNN(HIDDEN_SIZE, return_sequences=True), name='forward', input='input')
	model.add_node(TimeDistributedDense(2, activation='softmax'), name='softmax', input='forward')
	model.add_output(name='output', input='softmax')
	model.compile('adam', {'output': 'categorical_crossentropy'})

	for epoch in xrange(EPOCH_SIZE):
		print "epoch: ", epoch
		for idx, (seq, label) in enumerate(zip(X_train, y_train)):
			loss, accuracy = model.train_on_batch({'input':np.array([seq]), 'output':np.array([label])}, accuracy=True)
			if idx % 20 == 0:
				print "\tidx={0}, loss={1}, accuracy={2}".format(idx, loss, accuracy)

	if validate:
		_Y, _P = [], []
		for (seq, label) in zip(X_val, y_val):
			y = label.argmax(axis=-1)
			p = model.predict({'input':np.array([seq])})['output'][0].argmax(axis=-1)
			_Y.extend(list(y))
			_P.extend(list(p))
		_Y, _P = np.array(_Y), np.array(_P)
		print "should break right: ", ((_P == 1)*(_Y == 1)).sum()
		print "should break wrong: ", ((_P == 0)*(_Y == 1)).sum()
		print "should not break right: ", ((_P == 0)*(_Y == 0)).sum()
		print "should not break wrong: ", ((_P == 1)*(_Y == 0)).sum()

	with open('wordtable_json.txt','w') as wordtable_file:
		wordtable_file.write(wordtable.to_json())
	with open('model_json.txt','w') as model_file:
		model_file.write(model.to_json())
	model.save_weights('model_weights.h5', overwrite=True)
Exemplo n.º 15
0
                      border_mode='same',
                      W_regularizer=l2(l2value))(input)
pool1 = AveragePooling1D(pool_length=5, stride=1, border_mode='same')(conv1)
conv2 = Convolution1D(120,
                      5,
                      activation='tanh',
                      border_mode='same',
                      W_regularizer=l2(l2value))(pool1)
pool2 = AveragePooling1D(pool_length=5, stride=1, border_mode='same')(conv2)
conv3 = Convolution1D(160,
                      4,
                      activation='tanh',
                      border_mode='same',
                      W_regularizer=l2(l2value))(pool2)
output = TimeDistributedDense(8,
                              activation='softmax',
                              name='output',
                              W_regularizer=l2(l2value))(conv3)
model = Model(input=input, output=output)
adam = Adam(lr=lr)
model.compile(optimizer=adam,
              loss='categorical_crossentropy',
              metrics=['weighted_accuracy'])
model.summary()
best_model_file = './onehotcnn' + str(lr) + str(nep) + str(
    l2value) + '.h5'  #+str(hsize)+'tanh.h5'
best_model = ModelCheckpoint(best_model_file,
                             monitor='val_weighted_accuracy',
                             verbose=2,
                             save_best_only=True)
# and trained it via:
model.fit(traindata,
Exemplo n.º 16
0
features_size = 100
output_size = nchar
batch_len = 128
N_SAMPLE_PER_EPOCH = 10000000
NUMBER_OF_EPOCH = 10  # кол-во повторов тренировки по одному набору данных

model = Sequential()

rnn_layer = LSTM(input_dim=input_size,
                 output_dim=features_size,
                 activation='tanh',
                 return_sequences=True)

model.add(rnn_layer)

model.add(TimeDistributedDense(output_dim=output_size))
model.add(Activation('softmax'))
#sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)

print 'Compiling the model...'
#model.compile(loss='categorical_crossentropy', optimizer=sgd)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

print('Start training...')

total_session_count = 0
total_sample_count = 0

# накапливаем последовательности разной длины в отдельных списках
len2list_of_seq = {}
Exemplo n.º 17
0
if not config.rnn_bidirectional:
    model.add_node(rnn_layer(config.rnn_size, config), input=prev, name='rnn')
    model.add_node(Dropout(config.after_rnn_dropout), input='rnn',
                   name='dropout')
else:
    # BRNN following https://github.com/fchollet/keras/blob/master/examples/imdb_bidirectional_lstm.py. Alternative approach: https://github.com/EderSantana/seya/blob/master/seya/layers/recurrent.py#L17-L48
    model.add_node(rnn_layer(config.rnn_size/2, config),
                   input=prev, name='forward')
    model.add_node(rnn_layer(config.rnn_size/2, config, go_backwards=True),
                   input=prev, name='backward')
    model.add_node(Dropout(config.after_rnn_dropout),
                   inputs=['forward', 'backward'], name='dropout')

# Output
model.add_node(TimeDistributedDense(data.output_size), input='dropout',
               name='tddense')
model.add_node(Activation('softmax'), input='tddense', name='softmax')
model.add_output(input='softmax', name='output')

model.compile(optimizer=optimizer, loss={ 'output': config.loss })

def make_sequences(a, size=config.num_steps):
    """Reshape (d1, d2, ...) array into (d1/size, size, d2, ...).

    Pad with zeros if d1 % size is non-zero.
    """
    if a.shape[0] % size != 0:
        pad_size = size - (a.shape[0] % size)
        warn('make_sequences: padding with %d zeros' % pad_size)
        pad = np.zeros((pad_size,) + a.shape[1:], dtype=a.dtype)
Exemplo n.º 18
0
    return model
print "VGG loading"
image_model = VGG_16('vgg16_weights.h5')
image_model.trainable = False
print "VGG loaded"
# let's load the weights from a save file.
# image_model.load_weights('weight_file.h5')

# next, let's define a RNN model that encodes sequences of words
# into sequences of 128-dimensional word vectors.
print "Text model loading"
language_model = Sequential()
language_model.add(Embedding(vocab_size, 256, input_length=max_caption_len))
language_model.add(GRU(output_dim=128, return_sequences=True))
language_model.add(TimeDistributedDense(128))
print "Text model loaded"
# let's repeat the image vector to turn it into a sequence.
print "Repeat model loading"
image_model.add(RepeatVector(max_caption_len))
print "Repeat model loaded"
# the output of both models will be tensors of shape (samples, max_caption_len, 128).
# let's concatenate these 2 vector sequences.
print "Merging"
model = Sequential()
model.add(Merge([image_model, language_model], mode='concat', concat_axis=-1))
# let's encode this vector sequence into a single vector
model.add(GRU(256, return_sequences=False))
# which will be used to compute a probability
# distribution over what the next word in the caption should be!
model.add(Dense(vocab_size))
Exemplo n.º 19
0
def build_model():
    # design the deepaclstm model
    main_input = Input(shape=(700, ), dtype='float32', name='main_input')
    #main_input = Masking(mask_value=23)(main_input)
    x = Embedding(output_dim=21, input_dim=21, input_length=700)(main_input)
    auxiliary_input = Input(shape=(700, 21), name='aux_input')  #24
    #auxiliary_input = Masking(mask_value=0)(auxiliary_input)
    print main_input.get_shape()
    print auxiliary_input.get_shape()
    concat = merge([x, auxiliary_input], mode='concat', concat_axis=-1)

    conv1_features = Convolution1D(42,
                                   1,
                                   activation='relu',
                                   border_mode='same',
                                   W_regularizer=l2(0.001))(concat)
    # print 'conv1_features shape', conv1_features.get_shape()
    conv1_features = Reshape((700, 42, 1))(conv1_features)

    conv2_features = Convolution2D(42,
                                   3,
                                   1,
                                   activation='relu',
                                   border_mode='same',
                                   W_regularizer=l2(0.001))(conv1_features)
    # print 'conv2_features.get_shape()', conv2_features.get_shape()

    conv2_features = Reshape((700, 42 * 42))(conv2_features)
    conv2_features = Dropout(0.5)(conv2_features)
    conv2_features = Dense(400, activation='relu')(conv2_features)

    #, activation='tanh', inner_activation='sigmoid',dropout_W=0.5,dropout_U=0.5
    lstm_f1 = LSTM(output_dim=300,
                   return_sequences=True,
                   inner_activation='sigmoid',
                   dropout_W=0.5,
                   dropout_U=0.5)(conv2_features)
    lstm_b1 = LSTM(output_dim=300,
                   return_sequences=True,
                   go_backwards=True,
                   inner_activation='sigmoid',
                   dropout_W=0.5,
                   dropout_U=0.5)(conv2_features)

    lstm_f2 = LSTM(output_dim=300,
                   return_sequences=True,
                   inner_activation='sigmoid',
                   dropout_W=0.5,
                   dropout_U=0.5)(lstm_f1)
    lstm_b2 = LSTM(output_dim=300,
                   return_sequences=True,
                   go_backwards=True,
                   inner_activation='sigmoid',
                   dropout_W=0.5,
                   dropout_U=0.5)(lstm_b1)

    concat_features = merge([lstm_f2, lstm_b2, conv2_features],
                            mode='concat',
                            concat_axis=-1)

    concat_features = Dropout(0.4)(concat_features)
    protein_features = Dense(600, activation='relu')(concat_features)
    # protein_features = TimeDistributedDense(600,activation='relu')(concat_features)
    # protein_features = TimeDistributedDense(100,activation='relu', W_regularizer=l2(0.001))(protein_features)

    main_output = TimeDistributedDense(8,
                                       activation='softmax',
                                       name='main_output')(protein_features)

    model = Model(input=[main_input, auxiliary_input], output=[main_output])
    adam = Adam(lr=0.003)
    model.compile(optimizer=adam,
                  loss={'main_output': 'categorical_crossentropy'},
                  metrics=['weighted_accuracy'])
    model.summary()
    return model
Exemplo n.º 20
0
            if s[12] > 0:
                ns[3] = s[12]
            elif s[14] > 0:
                ns[3] = s[14]
            else:
                ns[3] = 0
            nt.append(ns)
    tdata.append(nt)
tdata = np.array(tdata)
x_train, x_test, _, _ = train_test_split(tdata, tdata, test_size=0.22, random_state=0)
# model
input_dim = 4

inputs = Input(shape=(x_train.shape[1], input_dim))
encoded = GRU(16, activation='relu', return_sequences=True)(inputs)
encoded = TimeDistributedDense(16)(encoded)
encoded = TimeDistributedDense(2)(encoded)
encoded = Flatten()(encoded)
decoded = Dense(128)(encoded)
decoded = Reshape((32,4))(decoded)
decoded = GRU(input_dim, return_sequences=True, activation='hard_sigmoid')(decoded)
m = Model(inputs, decoded)
e = Model(inputs, encoded)

m.compile(optimizer='adadelta', loss='binary_crossentropy')
m.summary()
m.fit(x_train, x_train, nb_epoch=500, batch_size=256, shuffle=True, validation_data=(x_test, x_test))

tt = x_test[0]
rr = np.around(m.predict(np.array([x_test[0]])))[0]
print(x_test[0])
Exemplo n.º 21
0
def main(argv):

    batch_size = 128
    nb_epochs = 10

    n_hidden = 128
    patience = 100
    train_data_size = 10000
    test_data_size = 5000
    T = 1000  #Delay length
    input_len = 10  #Input length
    category_size = 8  #Category size

    learning_rate = 0.001
    learning_rate_natGrad = 0.00001  #None
    clipnorm = 1.0

    savefile = "testing.txt"
    model = "uRNN_keras"
    out_every_t = True
    unitary_impl = "full"  #ASB2016"#full"#ASB2016" #full, otherwise
    unitary_init = 'ASB2016'  #ASB2016' #or it can be svd?, or just use ASB2016.

    histfile = 'exp/long_run'

    nb_classes = category_size + 2
    # --- Create data --------------------

    data_set, data_param = copyingProblemData(train_data_size, test_data_size,
                                              T, input_len, category_size)
    print("Done constructing data....")
    train_x = np.array(data_set['train']['Input'])
    train_y = np.array(data_set['train']['Output'])

    test_x = np.array(data_set['test']['Input'])
    test_y = np.array(data_set['test']['Output'])

    s_train_x = theano.shared(train_x)
    s_train_y = theano.shared(train_y)

    s_test_x = theano.shared(test_x)
    s_test_y = theano.shared(test_y)
    print(train_x.shape)
    print("Classes:", nb_classes)
    # --- Create theano graph and compute gradients ----------------------

    if (model == 'uRNN_keras'):
        epsilon = 1e-5
        model = Sequential()
        model.add(
            uRNN(output_dim=n_hidden,
                 inner_init=unitary_init,
                 unitary_impl=unitary_impl,
                 input_shape=train_x.shape[1:],
                 consume_less='gpu',
                 epsilon=epsilon,
                 return_sequences=True))
        model.add(TimeDistributedDense(nb_classes))
        model.add(Activation('softmax'))

    if (model == 'complex_RNN'):
        #This is currently broke still
        model = Sequential()
        model.add(
            complex_RNN_wrapper(output_dim=nb_classes,
                                hidden_dim=n_hidden,
                                unitary_impl=unitary_impl,
                                input_shape=train_x.shape[1:]))
        model.add(Activation('softmax'))

    if (model == 'LSTM'):
        model = Sequential()
        model.add(
            LSTM(n_hiddne,
                 return_sequences=True,
                 input_shape=train_x.shape[1:]))
        model.add(TimeDistributedDense(nb_classes))
        model.add(Activation('softmax'))

    #Setting up the model
    rmsprop = RMSprop_and_natGrad(lr=learning_rate,
                                  clipnorm=clipnorm,
                                  lr_natGrad=learning_rate_natGrad)
    model.compile(loss='categorical_crossentropy',
                  optimizer=rmsprop,
                  metrics=['accuracy'])
    history = LossHistory(histfile)
    checkpointer = keras.callbacks.ModelCheckpoint(filepath=savefile,
                                                   verbose=1,
                                                   save_best_only=True)
    earlystopping = keras.callbacks.EarlyStopping(monitor='val_loss',
                                                  patience=patience,
                                                  verbose=1,
                                                  mode='auto')

    #make sure the experiment directory to hold results exists
    if not os.path.exists('exp'):
        os.makedirs('exp')

    print(model.summary())

    #Now for the actual methods.
    print("X:", train_x.shape)
    print("Y:", train_y.shape)
    model.fit(train_x,
              train_y,
              nb_epoch=nb_epochs,
              verbose=1,
              batch_size=batch_size,
              validation_data=(test_x, test_y),
              callbacks=[history, checkpointer, earlystopping])
    print("Done fitting!")
    scores = model.evaluate(train_x, train_y, verbose=0)

    print('Test loss:', scores[0])
    print('Test accuracy:', scores[1])

    # add test scores to history
    history_load = cPickle.load(open(histfile, 'rb'))
    history_load.update({'test_loss': scores[0], 'test_acc': scores[1]})
    cPickle.dump(history_load, open(histfile, 'wb'))
Exemplo n.º 22
0
x_train, x_test, y_train, y_test = train_test_split(set_x,
                                                    set_y,
                                                    test_size=0.25,
                                                    random_state=0)
# create sample weights from y_train
nonbin_y_train = lb.inverse_transform(y_train)
count = dict(zip(*np.unique(nonbin_y_train, return_counts=True)))
tt = nonbin_y_train.shape[0]
samples_weights = []
for lbl in nonbin_y_train:
    samples_weights.append(count[lbl] / float(tt))
samples_weights = 1 - np.array(samples_weights)
# keras GRU
m = Sequential()
m.add(GRU(15, input_shape=(128, 15), activation='relu', return_sequences=True))
m.add(TimeDistributedDense(15, activation='relu'))
m.add(Flatten())
m.add(Dense(8, activation='hard_sigmoid'))
m.compile(optimizer='adadelta', loss='binary_crossentropy')
m.summary()
#
# GRU Learning
m.fit(x_train,
      y_train,
      sample_weight=samples_weights,
      nb_epoch=100,
      batch_size=128,
      shuffle=True,
      validation_data=(x_test, y_test))
#
gru_predictions = m.predict(c0_themes)
Exemplo n.º 23
0
def flow_prediction(method=""):
    record = [[[[0] * ranget for j in xrange(rangey)] for i in xrange(rangex)]
              for d in xrange(8)]
    for d, filename in enumerate(sorted(
            glob.glob(r"../data/pos_hour_user#/*"))):
        print filename
        for line in fileinput.input(filename):
            part = line.strip().split(" ")
            px, py, t, c = int(part[0].split(",")[0]), int(
                part[0].split(",")[1]), int(part[1]), int(part[2])
            record[d][px][py][t] = c
        fileinput.close()
    record = filter(lambda x: sum(x) >= 10 * ranget, [
        record[d][i][j] for d in xrange(8) for i in xrange(rangex)
        for j in xrange(rangey)
    ])

    if method == "RNN":
        from keras.models import Graph
        from keras.layers import recurrent, Dropout, TimeDistributedDense
        normal = [[1. * record[r][t] / max(record[r]) for t in xrange(ranget)]
                  for r in xrange(len(record))]
        X_train = np.array([[[p] for p in r][:-1] for r in normal])
        y_train = np.array([[[p] for p in r][1:] for r in normal])
        EPOCH_SIZE = 5
        HIDDEN_SIZE = 256
        RNN = recurrent.GRU  # Replace with SimpleRNN, LSTM, GRU
        model = Graph()
        model.add_input(name='input', input_shape=(ranget - 1, 1))
        model.add_node(RNN(HIDDEN_SIZE, return_sequences=True),
                       name='forward_l1',
                       input='input')
        model.add_node(TimeDistributedDense(1),
                       name='dense',
                       input='forward_l1')
        model.add_output(name='output', input='dense')
        model.compile('adam', {'output': 'mean_squared_error'})
        model.fit({
            'input': X_train,
            'output': y_train
        },
                  nb_epoch=EPOCH_SIZE,
                  show_accuracy=True)
        y_pred = model.predict({'input': X_train})['output']
        y_error_total, y_real_total = np.zeros((23, )), np.zeros((23, ))
        for r in xrange(len(record)):
            y_error_total += abs(
                np.reshape(max(record[r]) * y_pred[r], (23, )) -
                np.array(record[r][1:]))
            y_real_total += np.array(record[r][1:])
        print 1. * y_error_total / y_real_total

    if method == "ARIMA":
        from statsmodels.tsa.arima_model import ARIMA
        normal = np.array(normal)[np.random.choice(len(record), 100)]
        y_error_total, y_real_total = np.zeros((23, )), np.zeros((23, ))
        for x in normal:
            try:
                model = ARIMA(np.array(x), order=(2, 0, 1)).fit(disp=False)
                y_error_total += abs(model.predict(1, 23) - np.array(x[1:]))
                y_real_total += np.array(x[1:])
            except:
                continue
        print 1. * y_error_total / y_real_total
Exemplo n.º 24
0
seq.add(
    Convolution1D(2,
                  5,
                  activation='tanh',
                  border_mode='same',
                  W_regularizer=l2(0.001)))  # 80
seq.add(AveragePooling1D(pool_length=5, stride=1, border_mode='same'))
seq.add(
    Convolution1D(2,
                  4,
                  activation='tanh',
                  border_mode='same',
                  W_regularizer=l2(0.001)))  # 80
seq.add(
    TimeDistributedDense(8,
                         activation='softmax',
                         name='output',
                         W_regularizer=l2(0.001)))
adam = Adam(lr=lr)
seq.compile(optimizer=adam,
            loss='categorical_crossentropy',
            metrics=['accuracy'])
seq.summary()
#best_model_file = './onehotcnn'+str(lr)+str(nep)+'.h5'
#best_model = ModelCheckpoint(best_model_file, monitor='val_output_acc', verbose=1, save_best_only=True)
# and trained it via:
seq.fit(traindata,
        trainlabel,
        nb_epoch=nep,
        batch_size=2,
        validation_data=(testdata, testlabel))
Exemplo n.º 25
0
def build_model():

    main_input = Input(shape=(700, 21), name='main_input')

    concat = main_input

    conv1_features = Convolution1D(42,
                                   1,
                                   activation='relu',
                                   border_mode='same',
                                   W_regularizer=l2(0.001))(concat)
    # print 'conv1_features shape', conv1_features.get_shape()
    conv1_features = Reshape((700, 42, 1))(conv1_features)

    conv2_features = Convolution2D(42,
                                   3,
                                   1,
                                   activation='relu',
                                   border_mode='same',
                                   W_regularizer=l2(0.001))(conv1_features)
    # print 'conv2_features.get_shape()', conv2_features.get_shape()

    conv2_features = Reshape((700, 42 * 42))(conv2_features)
    conv2_features = Dropout(0.5)(conv2_features)
    conv2_features = Dense(400, activation='relu')(conv2_features)

    #, activation='tanh', inner_activation='sigmoid',dropout_W=0.5,dropout_U=0.5
    lstm_f1 = LSTM(output_dim=300,
                   return_sequences=True,
                   inner_activation='sigmoid',
                   dropout_W=0.5,
                   dropout_U=0.5)(conv2_features)
    lstm_b1 = LSTM(output_dim=300,
                   return_sequences=True,
                   go_backwards=True,
                   inner_activation='sigmoid',
                   dropout_W=0.5,
                   dropout_U=0.5)(conv2_features)

    lstm_f2 = LSTM(output_dim=300,
                   return_sequences=True,
                   inner_activation='sigmoid',
                   dropout_W=0.5,
                   dropout_U=0.5)(lstm_f1)
    lstm_b2 = LSTM(output_dim=300,
                   return_sequences=True,
                   go_backwards=True,
                   inner_activation='sigmoid',
                   dropout_W=0.5,
                   dropout_U=0.5)(lstm_b1)

    concat_features = merge([lstm_f2, lstm_b2, conv2_features],
                            mode='concat',
                            concat_axis=-1)

    concat_features = Dropout(0.4)(concat_features)
    protein_features = Dense(600, activation='relu')(concat_features)
    # protein_features = TimeDistributedDense(600,activation='relu')(concat_features)
    # protein_features = TimeDistributedDense(100,activation='relu', W_regularizer=l2(0.001))(protein_features)
    main_output = TimeDistributedDense(8,
                                       activation='softmax',
                                       name='main_output')(protein_features)

    deepaclstm = Model(input=[main_input], output=[main_output])
    adam = Adam(lr=0.003)
    deepaclstm.compile(optimizer=adam,
                       loss={'main_output': 'categorical_crossentropy'},
                       metrics=['weighted_accuracy'])
    deepaclstm.summary()
    return deepaclstm
Exemplo n.º 26
0
         activation='tanh',
         inner_activation='sigmoid',
         dropout_W=0.5,
         dropout_U=0.5)(f1)
f4 = GRU(output_dim=200,
         return_sequences=True,
         activation='tanh',
         inner_activation='sigmoid',
         go_backwards=True,
         dropout_W=0.5,
         dropout_U=0.5)(f2)
cf_feature = merge([f3, f4, d_output], mode='concat', concat_axis=2)
cf_feature = Dropout(0.4)(cf_feature)
f_input = Dense(600, activation='relu')(cf_feature)

main_output = TimeDistributedDense(8, activation='softmax',
                                   name='main_output')(f_input)

model = Model(input=[main_input, auxiliary_input], output=[main_output])
adam = Adam(lr=0.003)
model.compile(
    optimizer=adam,
    loss={'main_output': 'categorical_crossentropy'},
    # loss_weights={'main_output': 1},
    metrics=['weighted_accuracy'])
model.summary()

print "####### look at data's shape#########"
print train_hot.shape, trainpssm.shape, trainlabel.shape, test_hot.shape, testpssm.shape, testlabel.shape, val_hot.shape, valpssm.shape, vallabel.shape

earlyStopping = EarlyStopping(monitor='val_weighted_accuracy',
                              patience=5,
Exemplo n.º 27
0
        i += 1
    sample_weight.append(temp)
sample_weight = np.asarray(sample_weight)
X_sample_weight, X_sample_weight_rest = train_test_split(sample_weight,
                                                         test_size=0.1,
                                                         random_state=47)
sample_weight_val, sample_weight_test = train_test_split(X_sample_weight_rest,
                                                         test_size=0.5,
                                                         random_state=47)
#X_sample_weight=np.concatenate((X_sample_weight,sample_weight_val))
#model.add(Reshape((100,100),input_shape=(10000,)))
model.add(LSTM(60, return_sequences=True, input_shape=(timesteps, data_dim)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(TimeDistributedDense(28, activation='softmax'))
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              sample_weight_mode="temporal")
totaltrain = np.asarray(totaltrain)
totallabel = np.asarray(totallabel)
print len(x_val)
x_train = np.asarray(x_train)
x_test = np.asarray(x_test)
x_val = np.asarray(x_val)
y_train = np.asarray(y_train)
y_val = np.asarray(y_val)
y_test = np.asarray(y_test)
print y_train.shape
x_train = x_train[:, :100, :]
y_train = y_train[:, :100, :]
Exemplo n.º 28
0
print(y_train.shape)

print('Build model...')
model = Sequential()
# "Encode" the input sequence using an RNN, producing an output of HIDDEN_SIZE
# note: in a situation where your input sequences have a variable length,
# use input_shape=(None, nb_feature).
model.add(RNN(HIDDEN_SIZE, input_shape=(MAXLEN, len(chars))))
# For the decoder's input, we repeat the encoded input for each time step
model.add(RepeatVector(DIGITS + 1))
# The decoder RNN could be multiple layers stacked or a single layer
for _ in range(LAYERS):
    model.add(RNN(HIDDEN_SIZE, return_sequences=True))

# For each of step of the output sequence, decide which character should be chosen
model.add(TimeDistributedDense(len(chars)))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# Train the model each generation and show predictions against the validation dataset
for iteration in range(1, 200):
    print()
    print('-' * 50)
    print('Iteration', iteration)
    model.fit(X_train, y_train, batch_size=BATCH_SIZE, nb_epoch=1,
              validation_data=(X_val, y_val))
    ###
    # Select 10 samples from the validation set at random so we can visualize errors
print 'Loaded Inception model'

# Turn off training on base model layers
for layer in image_model.layers:
    layer.trainable = False

# Add on a dense layer to non-linearize features; Feature size is 2048
x = Dense(2048, activation='relu')(image_model.get_layer('flatten').output)
x = Dropout(0.5)(x)

# Build the label LSTM model
print "Loading label model"
label_model = Sequential()
label_model.add(Embedding(NUM_LABELS, 256, input_length=MAX_LABELS))
label_model.add(LSTM(output_dim=128, return_sequences=True))
label_model.add(TimeDistributedDense(128))
print "Label model loaded"

# Repeat image feature vector to turn it into a sequence
print "Repeat model loading"
x = RepeatVector(MAX_LABELS)(x)
# image_model.add(RepeatVector(MAX_LABELS))
print "Repeat model loaded"

img_model = Model(input=image_model.input, output=x)

# Merge image features and label features by concatenation
print "Merging image and label features"
model = Sequential()
model.add(Merge([img_model, label_model], mode='concat', concat_axis=-1))
Exemplo n.º 30
0
        go_backwards=True,
        dropout_W=0.5)(f)
f = merge([d, e], mode='concat')
d = GRU(output_dim=300,
        return_sequences=True,
        activation='tanh',
        inner_activation='sigmoid',
        dropout_W=0.5)(f)
e = GRU(output_dim=300,
        return_sequences=True,
        activation='tanh',
        inner_activation='sigmoid',
        go_backwards=True,
        dropout_W=0.5)(f)
f = merge([d, e, x], mode='concat')
f = TimeDistributedDense(200, activation='relu', W_regularizer=l2(0.001))(f)
f = TimeDistributedDense(200, activation='relu', W_regularizer=l2(0.001))(f)
main_output = TimeDistributedDense(8, activation='softmax',
                                   name='main_output')(f)
auxiliary_output = TimeDistributedDense(4,
                                        activation='softmax',
                                        name='aux_output')(f)
model = Model(input=[main_input, auxiliary_input],
              output=[main_output, auxiliary_output])
adam = Adam(lr=0.0003)
model.compile(optimizer=adam,
              loss={
                  'main_output': 'categorical_crossentropy',
                  'aux_output': 'categorical_crossentropy'
              },
              loss_weights={