def train():

    print('Build model...')
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen, dropout=0.2))
    model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2))  # try using a GRU instead, for fun
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    # try using different optimizers and different optimizer configs
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print('Train...')
    print(X_train.shape)
    print(y_train.shape)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,
              validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test,
                                batch_size=batch_size)
    print('Test score:', score)
    print('Test accuracy:', acc)

    with open("save_weight_lstm.pickle", mode="wb") as f:
        pickle.dump(model.get_weights(),f)
Example #2
0
def test_saving_overwrite_option_gcs():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    org_weights = model.get_weights()
    new_weights = [np.random.random(w.shape) for w in org_weights]

    with tf_file_io_proxy('keras.engine.saving.tf_file_io') as file_io_proxy:
        gcs_filepath = file_io_proxy.get_filepath(
            filename='test_saving_overwrite_option_gcs.h5')
        # we should not use same filename in several tests to allow for parallel
        # execution
        save_model(model, gcs_filepath)
        model.set_weights(new_weights)

        with patch('keras.engine.saving.ask_to_proceed_with_overwrite') as ask:
            ask.return_value = False
            save_model(model, gcs_filepath, overwrite=False)
            ask.assert_called_once()
            new_model = load_model(gcs_filepath)
            for w, org_w in zip(new_model.get_weights(), org_weights):
                assert_allclose(w, org_w)

            ask.return_value = True
            save_model(model, gcs_filepath, overwrite=False)
            assert ask.call_count == 2
            new_model = load_model(gcs_filepath)
            for w, new_w in zip(new_model.get_weights(), new_weights):
                assert_allclose(w, new_w)

        file_io_proxy.delete_file(gcs_filepath)  # cleanup
Example #3
0
def test_saving_overwrite_option():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    org_weights = model.get_weights()
    new_weights = [np.random.random(w.shape) for w in org_weights]

    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)
    model.set_weights(new_weights)

    with patch('keras.engine.saving.ask_to_proceed_with_overwrite') as ask:
        ask.return_value = False
        save_model(model, fname, overwrite=False)
        ask.assert_called_once()
        new_model = load_model(fname)
        for w, org_w in zip(new_model.get_weights(), org_weights):
            assert_allclose(w, org_w)

        ask.return_value = True
        save_model(model, fname, overwrite=False)
        assert ask.call_count == 2
        new_model = load_model(fname)
        for w, new_w in zip(new_model.get_weights(), new_weights):
            assert_allclose(w, new_w)

    os.remove(fname)
Example #4
0
def test_preprocess_weights_for_loading_for_model(layer):
    model = Sequential([layer])
    weights1 = model.get_weights()
    weights2 = topology.preprocess_weights_for_loading(
        model, convert_weights(layer, weights1),
        original_keras_version='1')
    assert all([np.allclose(x, y, 1e-5)
                for (x, y) in zip(weights1, weights2)])
Example #5
0
class brain:
    def __init__(self, model):
        if (model == None):
            self.model = Sequential()
            self.model.add(
                Dense(8, activation="tanh", input_dim=6,
                      kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
            self.model.add(
                Dense(3, activation="tanh",
                      kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
            self.model.compile(loss='mean_squared_error', optimizer='adam')
        else:
            self.model = model

    def getOutputs(self, inputs):
        inputs.append(1)
        return self.model.predict(np.asarray([inputs]))

    def mutate(self, brain1, brain2):
        newBrain = []
        for i in range(0, len(self.model.get_weights()), 2):
            newWeights = []
            b1weights = brain1.get_weights()[i]
            b2weights = brain2.get_weights()[i]
            for n in range(len(b1weights)):
                w = []
                for m in range(len(b1weights[0])):
                    r = random()
                    k = 0
                    if random() < 0.1:
                        k = randint(-100, 100) / 100

                    if (r < 0.4):
                        w.append(b1weights[n][m] + k)
                    elif r > 0.6:
                        w.append(b2weights[n][m] + k)
                    else:
                        w.append((b1weights[n][m] + b2weights[n][m]) / 2 + k)

                newWeights.append(w)
            newBrain.append(newWeights)
            newBrain.append(self.model.get_weights()[i + 1])
        self.model.set_weights(newBrain)
Example #6
0
def test_save_load_weights_gcs():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    org_weights = model.get_weights()

    with tf_file_io_proxy('keras.engine.saving.tf_file_io') as file_io_proxy:
        gcs_filepath = file_io_proxy.get_filepath(
            filename='test_save_load_weights_gcs.h5')
        # we should not use same filename in several tests to allow for parallel
        # execution
        model.save_weights(gcs_filepath)
        model.set_weights([np.random.random(w.shape) for w in org_weights])
        for w, org_w in zip(model.get_weights(), org_weights):
            assert not (w == org_w).all()
        model.load_weights(gcs_filepath)
        for w, org_w in zip(model.get_weights(), org_weights):
            assert_allclose(w, org_w)

        file_io_proxy.delete_file(gcs_filepath)  # cleanup
 def init_neural_networks(self):
     print "init start"
     model = Sequential()
     model.add(Dense(input_dim = self.inputSize,output_dim = 20,init="he_normal",activation = "tanh"))
     model.add(Dense(input_dim = 20,output_dim = 1,init="he_normal",activation = "tanh"))
     model.add(Dense(input_dim=1 , output_dim = 1, init="he_normal",activation = "linear"))
     model.compile(loss = 'mean_squared_error',optimizer = 'rmsprop')
     weights = model.get_weights()
     self.learner = model
     print "init end"
Example #8
0
 def compare_newapi(self, klayer, blayer, input_data, weight_converter=None,
                    is_training=False, rtol=1e-6, atol=1e-6):
     from keras.models import Sequential as KSequential
     from bigdl.nn.keras.topology import Sequential as BSequential
     bmodel = BSequential()
     bmodel.add(blayer)
     kmodel = KSequential()
     kmodel.add(klayer)
     koutput = kmodel.predict(input_data)
     from bigdl.nn.keras.layer import BatchNormalization
     if isinstance(blayer, BatchNormalization):
         k_running_mean = K.eval(klayer.running_mean)
         k_running_std = K.eval(klayer.running_std)
         blayer.set_running_mean(k_running_mean)
         blayer.set_running_std(k_running_std)
     if kmodel.get_weights():
         bmodel.set_weights(weight_converter(klayer, kmodel.get_weights()))
     bmodel.training(is_training)
     boutput = bmodel.forward(input_data)
     self.assert_allclose(boutput, koutput, rtol=rtol, atol=atol)
Example #9
0
class Brain:
    def __init__(self, model):
        if (model == None):
            self.model = Sequential()
            self.model.add(Dense(12, input_dim=6, activation="tanh",
                                 kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))

            # self.model.add(Dense(20, activation="tanh",
            #                      kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
            # self.model.add(Dense(20, activation="tanh",
            #                      kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
            # self.model.add(Dense(20, activation="tanh",
            #                      kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
            self.model.add(Dense(3, activation="tanh",
                                 kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))
            self.model.compile(optimizer='sgd', loss='mean_squared_error')
        else:
            self.model = model

    def getOutputs(self, inputs):
        return self.model.predict(np.asarray([inputs]))

    def breed(self, brain1, brain2):
        newBrain = []
        for i in range(0, len(self.model.get_weights()), 2):
            newWeights = []
            b1weights = brain1.model.get_weights()[i]
            b2weights = brain2.model.get_weights()[i]
            for j in range(len(b1weights)):
                w = []
                for k in range(len(b1weights[0])):
                    r = random()
                    if r > 0.8:
                        genome = choice([b1weights[j][k], b2weights[j][k]])
                        w.append(genome + randint(-200, 200)/1000)
                    else:
                        w.append(choice([b1weights[j][k], b2weights[j][k]]))
                newWeights.append(w)
            newBrain.append(newWeights)
            newBrain.append(self.model.get_weights()[i + 1])
        self.model.set_weights(newBrain)
Example #10
0
 def test1():
   model = Sequential()
   model.add(Embedding(100,50,input_length=10,mask_zero=True))
   model.add(Sum(50,ave=True))
   model.compile(optimizer='sgd', loss='mse')
   a = model.predict(np.array([range(10)]))
   w = model.get_weights()[0]
   b = w[1:10,:].mean(0)
   if abs((a-b).sum())<1e-8:
     print("Behave as expectation")
   else:
     print("Something wrong")
Example #11
0
 def compare_layer(self, klayer, zlayer, input_data, weight_converter=None,
                   is_training=False, rtol=1e-6, atol=1e-6):
     """
     Compare forward results for Keras layer against Zoo Keras API layer.
     """
     from keras.models import Sequential as KSequential
     from zoo.pipeline.api.keras.models import Sequential as ZSequential
     zmodel = ZSequential()
     zmodel.add(zlayer)
     kmodel = KSequential()
     kmodel.add(klayer)
     koutput = kmodel.predict(input_data)
     from zoo.pipeline.api.keras.layers import BatchNormalization
     if isinstance(zlayer, BatchNormalization):
         k_running_mean = K.eval(klayer.running_mean)
         k_running_std = K.eval(klayer.running_std)
         zlayer.set_running_mean(k_running_mean)
         zlayer.set_running_std(k_running_std)
     if kmodel.get_weights():
         zmodel.set_weights(weight_converter(klayer, kmodel.get_weights()))
     zmodel.training(is_training)
     zoutput = zmodel.forward(input_data)
     self.assert_allclose(zoutput, koutput, rtol=rtol, atol=atol)
Example #12
0
def train():

    model = Sequential()
    model.add(Dense(output_dim=100, input_dim=28*28))
    model.add(Activation("relu"))
    model.add(Dense(output_dim=10))
    model.add(Activation("softmax"))

    model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])

    model.fit(X_train,y_train)

    with open("save_weight.pickle", mode="wb") as f:
        pickle.dump(model.get_weights(),f)
Example #13
0
def test_EarlyStopping_reuse():
    patience = 3
    data = np.random.random((100, 1))
    labels = np.where(data > 0.5, 1, 0)
    model = Sequential((
        Dense(1, input_dim=1, activation='relu'),
        Dense(1, activation='sigmoid'),
    ))
    model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
    stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)
    weights = model.get_weights()

    hist = model.fit(data, labels, callbacks=[stopper])
    assert len(hist.epoch) >= patience

    # This should allow training to go for at least `patience` epochs
    model.set_weights(weights)
    hist = model.fit(data, labels, callbacks=[stopper])
    assert len(hist.epoch) >= patience
def train_model(feature_layers, classification_layers, image_list, nb_epoch, nb_classes, img_rows, img_cols, weights=None): 
    # Create testset data for cross-val
    num_images = len(image_list)
    test_size = int(0.2 * num_images)
    print("Train size: ", num_images-test_size)
    print("Test size: ", test_size)

    model = Sequential()
    for l in feature_layers + classification_layers:
        model.add(l)

    if not(weights is None):
        model.set_weights(weights)

    # let's train the model using SGD + momentum (how original).
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)
    
    print('Using real time data augmentation')
    for e in range(nb_epoch):
        print('-'*40)
        print('Epoch', e)
        print('-'*40)
        print('Training...')
        # batch train with realtime data augmentation
        progbar = generic_utils.Progbar(num_images-test_size)
        for X_batch, Y_batch in flow(image_list[0:-test_size]):
            X_batch = X_batch.reshape(X_batch.shape[0], 3, img_rows, img_cols)
            Y_batch = np_utils.to_categorical(Y_batch, nb_classes)
            loss = model.train_on_batch(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[('train loss', loss)])

        print('Testing...')
        # test time!
        progbar = generic_utils.Progbar(test_size)
        for X_batch, Y_batch in flow(image_list[-test_size:]):
            X_batch = X_batch.reshape(X_batch.shape[0], 3, img_rows, img_cols)
            Y_batch = np_utils.to_categorical(Y_batch, nb_classes)
            score = model.test_on_batch(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[('test loss', score)])
    return model, model.get_weights()
Example #15
0
def _test_equivalence(channel_order=None):

    from kfs.layers.convolutional import Convolution2DEnergy_TemporalBasis
    from keras.models import Sequential
    #from keras.layers import Flatten, Dense
    input_shape = (12, 3, 64, 64)
    if channel_order is None:
        channel_order = K.image_data_format()
    if channel_order == 'channels_last':
        input_shape = (12, 64, 64, 3)


    nn = Sequential()
    nn.add(Convolution2DEnergy_TemporalBasis(8, 16, 4, (5, 5), 7,
                                            padding='same',
                                            input_shape=input_shape,
                                            data_format=channel_order))

    rng = np.random.RandomState(42)
    datums = rng.randn(6, 12, 3, 64, 64).astype('float32')
    if channel_order == 'channels_last':
        datums = datums.transpose(0, 1, 3, 4, 2)


    nn.compile(loss='mse', optimizer='sgd')

    nn2 = Sequential()
    nn2.add(Convolution2DEnergy_TemporalCorrelation(8, 16, 4, (5, 5), 7,
                                            padding='same',
                                            input_shape=input_shape,
                                            data_format=channel_order))
    nn2.compile(loss='mse', optimizer='sgd')
    nn2.set_weights(nn.get_weights())

    pred1 = nn.predict(datums)
    pred2 = nn2.predict(datums)
    assert ((pred1 - pred2) == 0.).all()

    return nn, nn.predict(datums), nn2, nn2.predict(datums)
Example #16
0
def define_autoencoder():
    # encoder
    autoencoder = Sequential()
    autoencoder.add(Conv2D(16, (3, 3), strides=1, activation="relu",
                           padding="same", input_shape=(28, 28, 1)))
    autoencoder.add(MaxPooling2D((2, 2), padding="same"))
    autoencoder.add(Conv2D(8, (3, 3), strides=1,
                           activation="relu", padding="same"))
    autoencoder.add(MaxPooling2D((2, 2), padding="same"))
    # decoder
    autoencoder.add(Conv2D(8, (3, 3), strides=1,
                           activation="relu", padding="same"))
    autoencoder.add(UpSampling2D((2, 2)))
    autoencoder.add(Conv2D(16, (3, 3), strides=1,
                           activation="relu", padding="same"))
    autoencoder.add(UpSampling2D((2, 2)))
    autoencoder.add(Conv2D(1, (3, 3), strides=1,
                           activation="sigmoid", padding="same"))

    autoencoder.compile(optimizer="adam", loss="binary_crossentropy")
    initial_weights = autoencoder.get_weights()
    return initial_weights, autoencoder
Example #17
0
class KerasWrapper(object):
    def __init__(self, parameters=Parameters()):
        self.params = parameters
        self.data_cleaner = DataCleaner()
        self.pre_processor = PreProcessor()
        self.load_data()
        self.model_constructed = False
        self.train_completed = False

    def clean_data(self):
        self.data_cleaner.load_data()
        self.data_cleaner.clean()
        self.data_cleaner.save_cleaned()

    def load_data(self):
        self.data = pd.read_csv(self.params.file_path)
        self.raw_data = self.data.copy(deep=True)
        self.data_cleaner.load_data()

    def __prepare_data(self):

        self.data = self.pre_processor.convert_objects_to_categorical(self.data, self.params.converting_columns)
        self.data = self.pre_processor.normalize_data(self.data, self.params.converting_columns)
        self.data.fillna(-1, inplace=True)

        self.inputs = self.data[self.params.input_params]
        self.outputs = self.data[self.params.output_params]

        excluded_input_data = self.inputs.drop(self.inputs.index[self.params.excluded_rows])
        excluded_output_data = self.outputs.drop(self.outputs.index[self.params.excluded_rows])
        X = excluded_input_data.values
        y = excluded_output_data.values
        y = np_utils.to_categorical(y)

        return X, y

    def create_model(self, summary=True):

        X, y = self.__prepare_data()

        if (self.model_constructed):
            self.model.set_weights(self.network_weights)
            return X, y

        dimof_input = X.shape[1]
        dimof_output = np.max(y) + 1

        if (summary):
            print('dimof_input: ', dimof_input)
            print('dimof_output: ', dimof_output)
            print('batch_size: ', self.params.batch_size)
            print('dimof_middle: ', self.params.nodes)
            print('dropout: ', self.params.dropout)
            print('countof_epoch: ', self.params.epochs)
            print('verbose: ', self.params.verbose)
            print()

        self.model = Sequential()
        self.model.add(
            Dense(self.params.nodes, input_dim=dimof_input, init='uniform', activation=self.params.activation))
        self.model.add(BatchNormalization(beta_init='uniform'))
        self.model.add(
            Dense(self.params.nodes * 2, input_dim=dimof_input, init='uniform', activation=self.params.activation))
        self.model.add(Dropout(self.params.dropout))
        self.model.add(Dense(dimof_output, input_dim=dimof_input, init='uniform', activation='softmax'))
        self.model.compile(loss='mse', optimizer='sgd', metrics=['accuracy'])

        weight_ref = self.model.get_weights()
        self.network_weights = np.empty_like(weight_ref)
        self.network_weights[:] = weight_ref

        if (summary):
            self.model.summary()

        self.model_constructed = True

        return X, y

    def start_train(self, input_data, output_data):
        callbacks = []
        if (self.params.early):
            callbacks.append(
                EarlyStopping(patience=self.params.patience, verbose=self.params.verbose, monitor='val_loss'))
        fit = self.model.fit(input_data, output_data, validation_split=0.2,
                             batch_size=self.params.batch_size, nb_epoch=self.params.epochs,
                             verbose=self.params.verbose, shuffle=True, callbacks=callbacks)
        self.train_completed = True
        return fit

    def evaluate(self, input, output):
        loss, accuracy = self.model.evaluate(input, output, verbose=self.params.verbose)
        return loss, accuracy

    def run_for_all_characters(self):
        self.raw_data['death'] = np.nan
        self.raw_data['live'] = np.nan
        self.raw_data.sort_values('popularity', ascending=False)
        index = self.raw_data.head(100).index.tolist()
        self.params.excluded_rows = []

        # index = self.raw_data.index.tolist()

        for i in index:
            self.params.excluded_rows.append(i)
            self.start_whole_process()
            self.prediction()
            self.params.excluded_rows = []
        self.params.excluded_rows = []

    def prediction(self):

        predictions = []
        for i in self.params.excluded_rows:
            chosen_class = self.model.predict_classes(
                self.inputs.iloc[i].values.reshape((1, len(self.params.input_params))),
                verbose=0)
            probability = self.model.predict_proba(
                self.inputs.iloc[i].values.reshape((1, len(self.params.input_params))),
                verbose=0)
            character = str(self.raw_data['name'][i])

            # rounding on 2 decimals
            death = int((probability[0][0] * 100) + 0.5) / 100.0
            life = int((probability[0][1] * 100) + 0.5) / 100.0

            self.raw_data.set_value(i, 'death', 0)
            self.raw_data.set_value(i, 'live', 0)

            data = (i, character, str(death), str(life))
            predictions.append(data)
            self._prediction_summary(chosen_class, probability, character)

        return predictions

    def start_whole_process(self):
        X, y = self.create_model()
        self.start_train(X, y)
        return self.evaluate(X, y)

    def _prediction_summary(self, chosen_class, probability, character):
        print('Name: ' + character)
        print('Dead: ' + str(probability[0][0]) + ' %')
        print('Alive: ' + str(probability[0][1]) + ' %')
        print('Chosen class: ' + str(chosen_class))
        print(30 * '-')
Example #18
0
from keras.optimizers import SGD, Adam, RMSprop
import sklearn.metrics as metrics

model = Sequential()
model.add(Dense(4,input_shape=(nrows*ncols,)))
model.add(Activation('sigmoid'))
model.add(Dense(5))
model.add(Activation('softmax'))

sgd = SGD()
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

h = model.fit(train_flat, train_labs, batch_size = 32, nb_epoch=4, validation_data = (test_flat,test_labs), verbose=1)

#W1,b1 = model.get_weights()
W1,b1,W2,b2 = model.get_weights()
num_param = 1024*4 + 4 + 4*5 + 5

sx, sy = (4,1)
f, con = plt.subplots(sx,sy, sharex='col', sharey='row')
con = con.reshape(sx,sy)
for xx in range(sx):
    for yy in range(sy):
        con[xx,yy].pcolormesh(W1[:,sy*xx+yy].reshape(nrows,ncols), cmap=plt.cm.hot) 

preds = np.argmax(model.predict(test_flat),axis=1)
labs = np.argmax(test_labs,axis=1)
conf = metrics.confusion_matrix(labs,preds)
predsp = model.predict_proba(test_flat)
aic = 2* num_param - 2*metrics.log_loss(np.argmax(test_labs,axis=1),predsp)
Example #19
0
    print(ite, loss)


# In[16]:

### 書き込み用ファイルを生成
f = open("vectors.txt", "w")

### 語彙数と特徴ベクトルの次元数を書き込む
f.write( " ".join([str(V-1), str(dim)]) )
f.write("\n")


# In[17]:

vectors = cbow.get_weights()[0]

### 学習で得られた単語の特徴ベクトルを書き込む
for word, i in tokenizer.word_index.items():
    f.write(word)
    f.write(" ")
    f.write(" ".join(map(str, list(vectors[i,:]))))
    f.write("\n")
f.close()


# In[18]:

w2v = Word2Vec.load_word2vec_format('./vectors.txt', binary=False)

Example #20
0
def model(X_train, Y_train, X_test, Y_test):
    '''
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation
    from keras.optimizers import RMSprop

    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=["accuracy"])

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model.to_yaml(), 'weights': pickle.dumps(model.get_weights())}
Example #21
0
def run():
    # params
    numbats = 1 # 100
    epochs = 5000 #20
    lr = 2./numbats #0.0001 # for SGD
    lr2 = 0.01
    evalinter = 1


    dims = 5#100
    wreg = 0.0# 0.00001

    datafileprefix = "../../data/"
    #datafileprefix = "../../data/nycfilms/"
    tensorfile = "toy.ssd"
    #tensorfile = "tripletensor.ssd"

    # get the data and split
    start = datetime.now()
    data = loaddata(datafileprefix+tensorfile)
    data.threshold(0.5)
    maxentid = max(data.maxid(1), data.maxid(2))
    #data.shiftids(0, maxentid+1)

    vocabsize = data.maxid(0)+1
    data = data.keys.lok

    trainX = data[:, [1, 0]]
    labels = data[:, 2]
    trainY = np.zeros((labels.shape[0], vocabsize)).astype("float32")
    trainY[np.arange(labels.shape[0]), labels] = 1
    batsize=int(math.ceil(data.shape[0]*1.0/numbats))

    print "source data loaded in %f seconds" % (datetime.now() - start).total_seconds()

    # train model
    print "training model"
    start = datetime.now()
    model = Sequential()
    model.add(Embedding(vocabsize, dims, W_regularizer=l2(wreg)))
    model.add(GRU(dims, activation="tanh", ))
    model.add(Dense(vocabsize, W_regularizer=l2(wreg), activation="softmax"))
    opt = SGD(lr=lr2, decay=1e-6, momentum=0.9, nesterov=True)
    opt = Adadelta()
    model.compile(optimizer=opt, loss="categorical_crossentropy")
    w = model.get_weights()
    print "model %s defined in %f" % (model.__class__.__name__, (datetime.now() - start).total_seconds())

    start = datetime.now()
    losses = LossHistory()
    model.fit(trainX, trainY, nb_epoch=epochs, batch_size=batsize, verbose=1, callbacks=[losses])
    print "model trained in %f" % (datetime.now() - start).total_seconds()

    print model.predict(np.asarray([[0, 10]]).astype("int32"))

    #print losses.losses
    plt.plot(losses.losses, "r")
    plt.show(block=False)

    save(model)

    embed()
model.add( Dense(input_dim=cols, output_dim=1, init='uniform') )


''' compile '''
sgd = SGD( lr=1e-3, decay=1e-4, momentum=0 )
model.compile( loss='mean_squared_error',optimizer=sgd )


''' fit (train) '''
n_epochs = 100000
model.fit( X_test, Y_test, nb_epoch=n_epochs, verbose=0 )

#save the model
json_string = model.to_json()
open('lin_reg_model.json', 'w').write(json_string)
model.save_weights('lin_reg_weights.h5')
print "Saved model and weights..."


''' evaluate (test) '''
#score = model.evaluate( X_test, Y_true, verbose=0 )
pred = model.predict( X_test )
print "Prediction \t Y_true \t Pred. Error"
print np.hstack([pred,Y,np.abs(pred-Y)])
print "Model weights = ", model.get_weights()


''' LS solution '''
#theta = np.dot( np.linalg.inv( np.dot(np.transpose(X_test), X_test) ), np.dot(np.transpose(X_test),Y_test) )
#print theta
Example #23
0
In [47]: regr.intercept_
Out[47]: 152.91886182616167
"""

###Linear Regression with keras
# Initialize Network
model = Sequential()
model.add(Dense(1, input_dim=1,init='uniform'))
model.add(Activation('linear'))
model.compile(loss='mse', optimizer='sgd')

model.fit(diabetes_X_train, diabetes_Y_train, nb_epoch=20000, batch_size=64,verbose=False)

"""
Took about 100 seconds on my chromebook without using multiple CPUs
In [68]: model.get_weights()
Out[68]: 
[array([[ 936.47363281]], dtype=float32),
 array([ 152.80149841], dtype=float32)]
"""

#Make lines and plot for both
w1,w0 = model.get_weights()
tt = np.linspace(np.min(diabetes_X[:, 0]), np.max(diabetes_X[:, 0]), 10)
nn_line = w0+w1*tt
lreg_line = regr.intercept_+regr.coef_*tt 

plt.plot(diabetes_X[:,0],diabetes['target'],'kx',tt,lreg_line,'r-',tt,nn_line[0],'b--')
plt.show()
X_test /= 255
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)

X_wholeset = np.vstack([X_train, X_test])
X_wholeset_tmp = X_wholeset

all_params = []
print('PRETRAINING')
for i in range(len(layer_sizes)-1):
    temp_ae_model = Sequential()
    temp_ae_model.add(DAE(layer_sizes[i], layer_sizes[i+1], activation='sigmoid', corruption_level=0.3))
    temp_ae_model.compile(loss='mean_squared_error', optimizer='adam')
    temp_ae_model.fit(X_wholeset_tmp, X_wholeset_tmp, nb_epoch=nb_pretrain_epochs[i], batch_size=batch_sizes[i])
    X_wholeset_tmp = temp_ae_model.predict(X_wholeset_tmp)
    W, b, bT = temp_ae_model.get_weights()
    all_params.append((W, b, bT))
# create model for fine tuning
final_ae_model = Sequential()
for i in range(len(layer_sizes)-1):
    dense_layer = Dense(layer_sizes[i], layer_sizes[i+1], activation='sigmoid')
    final_ae_model.add(dense_layer)
final_ae_model.add(Dense(layer_sizes[-1], nb_classes, activation='sigmoid'))
final_ae_model.add(Activation('softmax'))
final_ae_model.compile(loss='categorical_crossentropy', optimizer='adam')
# initialize weights
for i in range(len(layer_sizes)-1):
    W, b, bT = all_params[i]
    final_ae_model.layers[i].set_weights([W, b])
# finetune
print('FINETUNING')
class SupervisedModel(Model):

    """
    Class representing an abstract Supervised Model
    """

    def __init__(self, layers=list([]), activation='relu', out_activation='linear', dropout=0,
                 l1_reg=0, l2_reg=0, **kwargs):

        """
        :param layers: 
        :param activation: 
        :param out_activation: 
        :param dropout: 
        :param l1_reg: 
        :param l2_reg: 
        :param kwargs: Model's parameters
        """

        self.layers = layers
        self.activation = expand_arg(self.layers, activation)
        self.out_activation = out_activation
        self.dropout = expand_arg(self.layers, dropout)
        self.l1_reg = expand_arg(self.layers, l1_reg)
        self.l2_reg = expand_arg(self.layers, l2_reg)
        super().__init__(**kwargs)

    def validate_params(self):
        super().validate_params()
        assert self.layers and len(self.layers) > 0, 'Model must have at least one hidden layer'
        assert all([0 <= d <= 1 for d in self.dropout]), 'Invalid dropout value'
        assert all([f in valid_act_functions for f in self.activation]), 'Invalid activation function'
        assert self.out_activation in valid_act_functions, 'Invalid output activation function'
        assert all([x >= 0 for x in self.l1_reg]), 'Invalid l1_reg value'
        assert all([x >= 0 for x in self.l2_reg]), 'Invalid l2_reg value'

    def build_model(self, input_shape, n_output=1, metrics=None):

        """ Creates the computational graph for the Supervised Model.
        :param input_shape:
        :param n_output: number of output values.
        :param metrics:
        :return: self
        """

        self._model = Sequential(name=self.name)

        self._create_layers(input_shape, n_output)

        self._model.compile(optimizer=self.get_optimizer(), loss=self.loss_func, metrics=metrics)

    def _create_layers(self, input_shape, n_output):
        pass

    def fit(self, x_train, y_train, x_valid=None, y_valid=None, valid_split=0.):

        """ Fit the model to the data.
        :param x_train: Training data. shape(n_samples, n_features)
        :param y_train: Training labels. shape(n_samples, n_classes)
        :param x_valid:
        :param y_valid:
        :param valid_split:
        :return: self
        """

        x_train = self._check_x_shape(x_train)
        y_train = self._check_y_shape(y_train)

        self.build_model(x_train.shape, y_train.shape[-1])

        if x_valid is not None and y_valid is not None:
            x_valid = self._check_x_shape(x_valid)
            y_valid = self._check_y_shape(y_valid)
            valid_data = (x_valid, y_valid)
        else:
            valid_data = None

            # By default use 10% of training data for testing
            if self.early_stopping and valid_split == 0.:
                valid_split = 0.1

        self._train_step(x_train, y_train, valid_data, valid_split)

    def _train_step(self, x_train, y_train, valid_data=None, valid_split=0.):
        self._model.fit(x=x_train,
                        y=y_train,
                        batch_size=self.batch_size,
                        epochs=self.nb_epochs,
                        shuffle=False,
                        validation_data=valid_data,
                        validation_split=valid_split,
                        callbacks=self._callbacks,
                        verbose=self.verbose)

    def predict(self, x):

        """ Predict the labels for the test set.
        :param x: Testing data. shape(n_test_samples, n_features)
        :return: labels
        """

        x = self._check_x_shape(x)

        if self.loss_func == 'binary_crossentropy' or self.loss_func == 'categorical_crossentropy':
            return self._model.predict_classes(x, batch_size=self.batch_size, verbose=self.verbose)

        return self._model.predict(x=x, batch_size=self.batch_size, verbose=self.verbose)

    def predict_proba(self, x):

        """ Predict classes probabilities.
        :param x: Testing data. shape(n_test_samples, n_features)
        :return: probabilities
        """

        if self.loss_func != 'binary_crossentropy' or self.loss_func != 'categorical_crossentropy':
            raise TypeError('Model is not configured to predict classes probabilities. Please, use \
                            "binary_crossentropy" or "categorical_crossentropy" as loss function!')
        
        x = self._check_x_shape(x)

        probs = self._model.predict_proba(x, batch_size=self.batch_size, verbose=self.verbose)

        # check if binary classification
        if probs.shape[1] == 1:
            # first column is probability of class 0 and second is of class 1
            probs = np.hstack([1 - probs, probs])
        return probs

    def score(self, x, y):

        """ Evaluate the model on (x, y).
        :param x: Input data
        :param y: Target values
        :return:
        """

        x = self._check_x_shape(x)
        y = self._check_y_shape(y)

        loss = self._model.evaluate(x=x, y=y, batch_size=self.batch_size, verbose=self.verbose)

        if isinstance(loss, list):
            return loss[0]
        return loss

    def get_model_parameters(self):

        """ Return the model parameters in the form of numpy arrays.
        :return: model parameters
        """

        return self._model.get_weights()

    def get_config(self):
        conf = super().get_config()
        layers = []
        for l in self.layers:
            if isinstance(l, int):
                layers.append(l)
            else:
                layers.append(l.to_json()['model'])
        conf['layers'] = layers
        return conf

    @classmethod
    def from_config(cls, config):
        layers = []
        for l in config['layers']:
            if isinstance(l, dict):
                layers.append(model_from_config(l))
            else:
                layers.append(l)
        config['layers'] = layers
        return cls(**config)

    def _check_x_shape(self, x):
        return x

    def _check_y_shape(self, y):
        y = np.array(y)

        if len(y.shape) == 1:
            if self.loss_func == 'categorical_crossentropy':
                return to_categorical(y)

            return np.reshape(y, (y.shape[0], 1))

        return y
recog1.add(Merge([recog_left,recog_right],mode = 'ave'))
recog1.add(Dense(784))

#### HERE***
recog11=Sequential()
layer=Dense(64,init='glorot_uniform',input_shape=(784,))
layer.trainable=False
recog11.add(layer)
layer2=Dense(784, activation='sigmoid',init='glorot_uniform')
layer2.trainable=False
recog11.add(layer2)
recog11.layers[0].W.set_value(np.ones((784,64)).astype(np.float32))

recog11.compile(loss='mean_squared_error', optimizer=sgd,metrics = ['mae'])

recog11.get_weights()[0].shape

gan_input = Input(batch_shape=(1,784))

gan_level2 = recog11(recog1(gan_input))

GAN = Model(gan_input, gan_level2)
GAN.compile(loss='mean_squared_error', optimizer='adam',metrics = ['mae'])

GAN.fit(x_train_orig[0].reshape(1,784), x_train_orig[0].reshape((1,784)), 
        batch_size=30, nb_epoch=100,verbose=1)

### UNIQUE BLUEPRINT
a=GAN.predict(x_train[0].reshape(1,784),verbose=1)

plt.figure(figsize=(10, 10))
model.add(Dense(6, input_dim = 10, W_regularizer=l2(0.005)))
model.add(Activation('relu'))
model.add(Dense(6, input_dim = 6, W_regularizer=l2(0.005)))
model.add(Activation('relu'))

model.add(Dense(6, input_dim = 6, W_regularizer=l2(0.005)))
model.add(Activation('relu'))
model.add(Dense(1, input_dim = 6, W_regularizer=l2(0.005)))
model.add(Activation('linear'))

model.compile(loss="mean_squared_error", optimizer="rmsprop")
train_x_set, train_y_set, test_x_set, test_y_set = load_data()

model.fit(train_x_set, train_y_set, batch_size=3500, nb_epoch=5000, validation_split=0.05)
print(model.get_weights())
predicted = model.predict(test_x_set)
rmse = np.sqrt(((predicted - test_y_set) ** 2).mean())


print("预测值:")
print(predicted.T)
print("实际:")
print(test_y_set.T)
print(rmse)

#num = 0



#for i in range(len(test_y_set)):
Example #28
0
                  np.array([-0.5])]
'''
output_weights = None

model = Sequential()

model.add(SimpleRNN(input_dim=1, output_dim=2, init='normal',
                    inner_init='orthogonal', activation=steeper_sigmoid,
                    weights=RNNlayer_weights,
                    return_sequences=True))

model.add(Dense(2, 1, init='normal', activation=steeper_sigmoid, weights=output_weights))

model.compile(loss='binary_crossentropy', optimizer='Adagrad')

initialWeights = model.get_weights()

history = model.fit(X_train3D, Y_train3D, batch_size=batchsize, nb_epoch=1000, show_accuracy=True)

score = model.evaluate(X_test3D, Y_test3D, show_accuracy=True)

print("score (loss, accuracy):")
print(score)

print("predicted output:")
print(model.predict(X_test3D, verbose=1))
print("actual output:")
print(Y_test)
print("actual input:")
print(X_test)
print('initial weights:')
Example #29
0
model.add(Dense(input_dim=X_train.shape[1], output_dim=50, init = 'uniform',activation='tanh',bias = True))
#%%
model.add(Dense(input_dim=50,output_dim=50,init = 'uniform', activation='tanh', bias = True))
#%%
#model.add(Dense(input_dim=50, output_dim = 25,init = 'uniform',activation = 'tanh', bias = True))
#%%
model.add(Dense(input_dim=50, output_dim=y_train.shape[1], init = 'uniform', activation='softmax'))
sgd = SGD(lr=0.001, decay=1e-5, momentum=.9)
model.compile(loss='categorical_crossentropy', optimizer=sgd)

#%%
layer = model.layers

print layer
#%%
weight = model.get_weights()
print weight
#%%
model.fit(X_train, y_train, nb_epoch=50,  batch_size=300, verbose=0, validation_split=0.1, 
          show_accuracy=True)
          
#%%
print (theano.config.floatX)
print (theano.config.device)
#%%
from sklearn.metrics import accuracy_score, confusion_matrix

y_train_pred = model.predict_classes(X_train, verbose=0)
accS = accuracy_score(Y_train, y_train_pred)
conf = confusion_matrix(Y_train, y_train_pred)
#%%
Example #30
0
import sys
import json

from keras.models import Sequential
from keras.layers.core import Dense, Activation

def save_str(file, data):
	f = open(file, 'w')
	f.write(data)
	f.close()

model = Sequential()

model.add(Dense(input_dim = 4, output_dim = 6))
model.add(Activation('relu'))

model.add(Dense(output_dim = 3))
model.add(Activation('softmax'))

wg = [x.tolist() for x in model.get_weights()]

save_str(sys.argv[1], model.to_json())
save_str(sys.argv[2], json.dumps(wg))