Exemplo n.º 1
0
def model_d2nn():
	if resume:
		model = load_model('model.h5')
	else:
		model = Sequential()

		input_layer = Input(shape=(Sin, D, D))
		conv1 = Conv2D(32, 4, strides=(1, 1), activation='relu')(input_layer)
		conv2 = Conv2D(64, 3, strides=(1, 1), activation='relu')(conv1)
		conv3 = Conv2D(64, 1, strides=(1, 1), activation='relu')(conv2)
		# pool = MaxPooling2D(pool_size=(2, 2))(conv3)

		flat = Flatten()(conv3)
		# fc1 = Dense(400, activation='relu')(flat)

		a_fc = Dense(100, activation='relu')(flat)
		advantage = Dense(S, activation='linear', name="advantage")(a_fc)

		v_fc = Dense(100, activation='relu')(flat)
		value = Dense(1, activation='linear', name="value")(v_fc)

		policy = Lambda(lambda x: x[0]-K.mean(x[0])+x[1])([advantage, value])

		model = Model(input=[input_layer], output=[policy])
		# model_a = Model(input=[input_layer], output=[advantage])
		# model_v = Model(input=[input_layer], output=[value])

	model_a = Model(input=[model.input], output=[model.get_layer("advantage").output])
	model_v = Model(input=[model.input], output=[model.get_layer("value").output])
	model.compile(loss=keras.losses.mean_squared_error,
			   optimizer=keras.optimizers.Adadelta(),
			   metrics=['accuracy'])
	return model, model_a, model_v
 def test_Embedding_layer(self):
     model = Sequential()
     model.add(Embedding(1000, 64, input_length=10))
     self.assertEqual(model.get_layer(index=0).input_dim, 1000)
     self.assertEqual(model.get_layer(index=0).output_dim, 64)
     self.assertEqual(model.get_layer(index=0).embeddings.shape, (1000, 64))
     self.assertEqual(model.get_layer(index=0).input_shape, (None, 10))
     self.assertEqual(model.get_layer(index=0).output_shape, (None, 10, 64))
def test_rebuild_model():
    model = Sequential()
    model.add(Dense(128, input_shape=(784,)))
    model.add(Dense(64))
    assert(model.get_layer(index=-1).output_shape == (None, 64))

    model.add(Dense(32))
    assert(model.get_layer(index=-1).output_shape == (None, 32))
 def test_Dense_layer(self):
     model = Sequential()
     model.add(Dense(2, activation='relu', input_shape=(4, )))
     model.add(Dense(1, activation='sigmoid'))
     self.assertEqual(model.get_layer(index=0).input_shape, (None, 4))
     self.assertEqual(model.get_layer(index=0).output_shape, (None, 2))
     self.assertEqual(model.get_layer(index=0).activation.__name__, 'relu')
     self.assertEqual(model.get_layer(index=1).input_shape, (None, 2))
     self.assertEqual(model.get_layer(index=1).output_shape, (None, 1))
     self.assertEqual(
         model.get_layer(index=1).activation.__name__, 'sigmoid')
 def test_GRU_layer(self):
     model = Sequential()
     model.add(Embedding(1000, 64, input_length=10))
     model.add(GRU(128))
     self.assertEqual(model.get_layer(index=1).units, 128)
     self.assertEqual(model.get_layer(index=1).input_shape, (None, 10, 64))
     self.assertEqual(model.get_layer(index=1).output_shape, (None, 128))
     model.pop()
     model.add(GRU(128, return_sequences=True))
     self.assertEqual(
         model.get_layer(index=1).output_shape, (None, 10, 128))
 def test_Bidirectional_wrapper(self):
     model = Sequential()
     model.add(Embedding(1000, 64, input_length=10))
     model.add(Bidirectional(LSTM(128)))
     self.assertEqual(model.get_layer(index=1).forward_layer.units, 128)
     self.assertEqual(model.get_layer(index=1).backward_layer.units, 128)
     self.assertEqual(model.get_layer(index=1).input_shape, (None, 10, 64))
     self.assertEqual(model.get_layer(index=1).output_shape, (None, 256))
     model.pop()
     model.add(Bidirectional(LSTM(128, return_sequences=True)))
     self.assertEqual(
         model.get_layer(index=1).output_shape, (None, 10, 256))
Exemplo n.º 7
0
    def two_layer_net_split(self):
        model = Sequential()

        model.add(Dense(128, input_dim=32 * 32 * 3, name="hidden1"))
        model.add(Activation("relu"))

        model.add(Dense(output_dim=43, name="output"))
        model.add(Activation("softmax"))

        # STOP: Do not change the tests below. Your implementation should pass these tests.
        assert (model.get_layer(name="hidden1").input_shape == (
            None, 32 * 32 * 3)), "The input shape is: %s" % model.get_layer(
                name="hidden1").input_shape
        assert (model.get_layer(name="output").output_shape == (
            None, 43)), "The output shape is: %s" % model.get_layer(
                name="output").output_shape

        model.compile(loss='categorical_crossentropy',
                      optimizer=Adam(lr=1e-3,
                                     beta_1=0.9,
                                     beta_2=0.999,
                                     epsilon=1e-08,
                                     decay=0.0),
                      metrics=['accuracy'])

        self.X_train, self.X_val, self.y_train, self.y_val = train_test_split(
            self.X_train, self.y_train, test_size=0.25, random_state=42)

        self.encoder = OneHotEncoder(sparse=False,
                                     n_values=43).fit(self.y_train)

        y_train_encoded = self.encoder.transform(self.y_train)
        y_val_encoded = self.encoder.transform(self.y_val)

        history = model.fit(self.X_train.reshape(-1, 32 * 32 * 3),
                            y_train_encoded,
                            nb_epoch=2,
                            batch_size=32,
                            verbose=2,
                            validation_data=(self.X_val.reshape(
                                -1, 32 * 32 * 3), y_val_encoded))

        # STOP: Do not change the tests below. Your implementation should pass these tests.
        assert (
            round(self.X_train.shape[0] / float(self.X_val.shape[0])) == 3
        ), "The training set is %.3f times larger than the validation set." % self.X_train.shape[
            0] / float(self.X_val.shape[0])
        assert (
            history.history['val_acc'][0] > 0.6
        ), "The validation accuracy is: %.3f" % history.history['val_acc'][0]
        return
 def test_Conv2D_layer(self):
     model = Sequential()
     model.add(
         Conv2D(32,
                kernel_size=3,
                activation='relu',
                input_shape=(28, 28, 1)))
     self.assertEqual(model.get_layer(index=0).filters, 32)
     self.assertEqual(model.get_layer(index=0).kernel_size, (3, 3))
     self.assertEqual(model.get_layer(index=0).activation.__name__, 'relu')
     self.assertEqual(
         model.get_layer(index=0).input_shape, (None, 28, 28, 1))
     self.assertEqual(
         model.get_layer(index=0).output_shape, (None, 26, 26, 32))
Exemplo n.º 9
0
def define_supernet(members, new_training_set):
    model = Sequential([
        Dense(nb_classes, activation='softmax', input_shape=(new_training_set.shape[1],))
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer="rmsprop",
                  metrics=['accuracy'])
    all_weights = [m.get_layer(index=-1).get_weights() for m in members]

    weights = np.concatenate([w for w, _ in all_weights], axis=0)
    biases = np.mean([b for _, b in all_weights], axis=0)

    model.get_layer(index=-1).set_weights([weights, biases])
    return model
Exemplo n.º 10
0
def test_find_activation_layer():
    conv1_filters = 1
    conv2_filters = 1
    dense_units = 1
    model = Sequential()
    model.add(
        Conv2D(conv1_filters, [3, 3],
               input_shape=(28, 28, 1),
               data_format="channels_last",
               name='conv_1'))
    model.add(Activation('relu', name='act_1'))
    model.add(MaxPool2D((2, 2), name='pool_1'))
    model.add(
        Conv2D(conv2_filters, [3, 3],
               data_format="channels_last",
               name='conv_2'))
    model.add(Activation('relu', name='act_2'))
    model.add(MaxPool2D((2, 2), name='pool_2'))
    model.add(Flatten(name='flat_1'))
    model.add(Dense(dense_units, name='dense_1'))
    model.add(Activation('relu', name='act_3'))
    model.add(Dense(10, name='dense_2'))
    model.add(Activation('softmax', name='act_4'))
    assert find_activation_layer(model.get_layer('conv_1'),
                                 0) == (model.get_layer('act_1'), 0)
    assert find_activation_layer(model.get_layer('conv_2'),
                                 0) == (model.get_layer('act_2'), 0)
    assert find_activation_layer(model.get_layer('dense_1'),
                                 0) == (model.get_layer('act_3'), 0)
    assert find_activation_layer(model.get_layer('dense_2'),
                                 0) == (model.get_layer('act_4'), 0)
def get_conv_output(out_channels, kh, kw, sh, sw, ih, iw, ic, padding='VALID', framework='KERAS'):
    if (framework == 'KERAS'):
        model = Sequential()
        model.add(Conv2D(out_channels, kernel_size=(kh,kw), strides=(sh,sw), input_shape=(ih, iw, ic), padding=padding, name='conv'))
        
        out_h = model.get_layer('conv').output_shape[1]
        out_w = model.get_layer('conv').output_shape[2]
        out_c = model.get_layer('conv').output_shape[3]
        print(out_h, out_w, out_c)
    if (framework == 'TORCH'):
        if (padding == 'VALID'):
            ph, pw = 0, 0
        if (padding == 'SAME'):
            if (kh % 2 == 0):
                ph = kh//2
            else:
                ph = math.ceil(kh//2)
            if (kw % 2 == 0):
                pw = kw//2
            else:
                pw = math.ceil(kw//2)

        class Net(nn.Module):
            def __init__(self):
                super(Net, self).__init__()
                self.conv1 = nn.Conv2d(ic, out_channels, kernel_size=(kh,kw), stride=(sh,sw), padding=(ph,pw), bias=False)

            def forward(self, x):
                return self.conv1(x)

        device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # PyTorch v0.4.0
        model = Net().to(device)

        summary(model, (ic, ih, iw))

        names = os.listdir(filepath)
        names.remove('README.txt')
        names.remove('checksums')

        files = []
        for name in names:
            i_names = os.listdir(filepath + f'/{name}/')
            for n in i_names:
                if int(n[:6]) in self.FILES_FAULTY:
                    continue
                files.append(filepath + f'/{name}/{n}')

        return np.asarray(files)
Exemplo n.º 12
0
def make_wider_student_model(teacher_model,
                             train_data,
                             validation_data,
                             init,
                             nb_epoch=3):
    '''Train a wider student model based on teacher_model,
       with either 'random-pad' (baseline) or 'net2wider'
    '''
    new_conv1_width = 128
    new_fc1_width = 128

    model = Sequential()
    # a wider conv1 compared to teacher_model
    model.add(
        Conv2D(new_conv1_width,
               3,
               3,
               input_shape=input_shape,
               border_mode='same',
               name='conv1'))
    model.add(MaxPooling2D(name='pool1'))
    model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2'))
    model.add(MaxPooling2D(name='pool2'))
    model.add(Flatten(name='flatten'))
    # a wider fc1 compared to teacher model
    model.add(Dense(new_fc1_width, activation='relu', name='fc1'))
    model.add(Dense(nb_class, activation='softmax', name='fc2'))

    # The weights for other layers need to be copied from teacher_model
    # to student_model, except for widened layers
    # and their immediate downstreams, which will be initialized separately.
    # For this example there are no other layers that need to be copied.

    w_conv1, b_conv1 = teacher_model.get_layer('conv1').get_weights()
    w_conv2, b_conv2 = teacher_model.get_layer('conv2').get_weights()
    new_w_conv1, new_b_conv1, new_w_conv2 = wider2net_conv2d(
        w_conv1, b_conv1, w_conv2, new_conv1_width, init)
    model.get_layer('conv1').set_weights([new_w_conv1, new_b_conv1])
    model.get_layer('conv2').set_weights([new_w_conv2, b_conv2])

    w_fc1, b_fc1 = teacher_model.get_layer('fc1').get_weights()
    w_fc2, b_fc2 = teacher_model.get_layer('fc2').get_weights()
    new_w_fc1, new_b_fc1, new_w_fc2 = wider2net_fc(w_fc1, b_fc1, w_fc2,
                                                   new_fc1_width, init)
    model.get_layer('fc1').set_weights([new_w_fc1, new_b_fc1])
    model.get_layer('fc2').set_weights([new_w_fc2, b_fc2])

    model = make_model(model,
                       loss='categorical_crossentropy',
                       optimizer=SGD(lr=0.001, momentum=0.9),
                       metrics=['accuracy'])

    train_x, train_y = train_data
    history = model.fit(train_x,
                        train_y,
                        nb_epoch=nb_epoch,
                        validation_data=validation_data)
    return model, history
Exemplo n.º 13
0
class DeepLearningModel:
    def __init__(self):
        self.model = None
        self.x_dim = 0

    def build(self, training_data, batch_size=10, epochs=150):
        x_tdm, y_narr = training_data
        x_narr = numpy.array(x_tdm.toarray())
        self.x_dim = x_narr.shape[1]
        # neurons in hidden layer: a rule of thumb is 2/3 * (input + output)
        neurons_hidden_layer = int(round((self.x_dim + 1) * 0.66))
        # fix random seed for reproducibility
        seed = 7
        numpy.random.seed(seed)
        # create model
        self.model = Sequential()
        self.model.add(
            Dense(12,
                  input_dim=self.x_dim,
                  init='uniform',
                  activation='relu',
                  name='primary_input'))
        self.model.add(
            Dense(neurons_hidden_layer, init='uniform', activation='relu'))
        self.model.add(Dense(1, init='uniform', activation='sigmoid'))
        # Compile model
        self.model.compile(loss='binary_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])
        # Fit the model
        verbosity = 1
        self.model.fit(x_narr, y_narr, batch_size, epochs, verbosity)
        # evaluate the model
        scores = self.model.evaluate(x_narr, y_narr)
        # print( self.model.metrics_names )
        # print( "%s: %.2f%%" % ( self.model.metrics_names[1], scores[1]*100 ) )
        accuracy = scores[1] * 100
        return accuracy

    def predict(self, narr):
        if self.model != None:
            if self.x_dim == narr.shape[1]:
                # this ignores words not seen before! (Given a large enough corpus this
                # shouldn't be a big problem.)
                predictions = self.model.predict(narr)
                return predictions
            else:
                raise TestingDimensionError
        else:
            raise NoDeepLearningModelError

    def save(self, name):
        self.model.save(
            os.path.join(sfsf_config.get_data_dir(), '{n}.h5'.format(n=name)))

    def load(self, name):
        del self.model
        self.model = load_model(
            os.path.join(sfsf_config.get_data_dir(), '{n}.h5'.format(n=name)))
        self.x_dim = self.model.get_layer('primary_input').input_shape[1]
Exemplo n.º 14
0
def train_stacked_autoencoder(dataset):
    # Parâmetros das camadas
    input_dim = dataset.shape[1]
    hidden_dim = 10
    activation_function = 'sigmoid'
    train_epochs = 100  # TODO

    # Modelo
    sae = Sequential()
    sae.add(
        Dense(hidden_dim,
              activation=activation_function,
              input_shape=(input_dim, )))
    sae.add(Dense(input_dim, activation=activation_function))
    sae.compile(optimizer='adam', loss='mse')
    sae.fit(dataset, dataset, epochs=train_epochs, verbose=0)

    for i in range(3):
        sae.pop()
        previous_layer = sae.get_layer(index=-1)
        previous_layer.trainable = False
        sae.add(Dense(hidden_dim, activation=activation_function))
        sae.add(Dense(input_dim, activation=activation_function))
        sae.compile(optimizer='adam', loss='mse')
        sae.fit(dataset, dataset, epochs=train_epochs, verbose=0)

    return sae
Exemplo n.º 15
0
def autoencoder(x_train, y_train):
    x_train = x_train.reshape(60000, 784) / 255

    model = Sequential()
    model.add(Dense(512, activation='relu', input_shape=(784, )))
    model.add(Dense(256, activation='relu'))
    model.add(Dense(128, activation='relu'))
    model.add(Dense(2, activation='linear', name="inner_layer"))
    model.add(Dense(128, activation='relu'))
    model.add(Dense(256, activation='relu'))
    model.add(Dense(512, activation='relu'))
    model.add(Dense(784, activation='sigmoid'))

    model.compile(loss='mean_squared_error', optimizer=Adam(), metrics=['mse'])
    plot_model(model, to_file='autoencoder_model.png')

    m = model.fit(x_train, x_train, epochs=1, batch_size=128, verbose=1)

    encoder = Model(model.input, model.get_layer('inner_layer').output)
    Zenc = encoder.predict(x_train)  # bottleneck representation
    plt.title('Autoencoder')
    plt.scatter(Zenc[:5000, 0],
                Zenc[:5000, 1],
                c=y_train[:5000],
                cmap=plt.get_cmap('jet', 10),
                s=5)
    plt.colorbar()
    plt.gca().get_xaxis().set_ticklabels([])
    plt.gca().get_yaxis().set_ticklabels([])
    plt.show()
Exemplo n.º 16
0
def siamese_model(params):
    input_shape = (params['ts_len'], params['n_feats'], 1)
    left_input = Input(input_shape)
    right_input = Input(input_shape)

    model = Sequential()
    activ = 'relu'
    model.add(Conv2D(16, kernel_size=(4, 1), activation=activ, name='conv1'))
    model.add(MaxPooling2D(pool_size=(2, 1), name='pool1'))
    model.add(Conv2D(16, kernel_size=(4, 1), activation=activ, name='conv2'))
    model.add(MaxPooling2D(pool_size=(2, 1), name='pool2'))
    model.add(Flatten())
    model.add(Dense(100, activation=activ, name='hidden'))

    encoded_l = model(left_input)
    encoded_r = model(right_input)

    L1_layer = Lambda(lambda tensors: K.abs(tensors[0] - tensors[1]))
    L1_distance = L1_layer([encoded_l, encoded_r])

    prediction = Dense(2, activation='sigmoid')(L1_distance)
    siamese_net = Model(inputs=[left_input, right_input], outputs=prediction)

    optimizer = keras.optimizers.Adam(lr=0.00006)
    siamese_net.compile(loss="binary_crossentropy", optimizer=optimizer)
    extractor = Model(inputs=model.get_input_at(0),
                      outputs=model.get_layer("hidden").output)
    extractor.summary()
    return siamese_net, extractor
Exemplo n.º 17
0
def CAE(input_shape=(28, 28, 1), filters=[32, 64, 128, 10]):
    model = Sequential()
    if input_shape[0] % 8 == 0:
        pad3 = 'same'
    else:
        pad3 = 'valid'
    init = VarianceScaling(scale=1. / 3., mode='fan_in', distribution='uniform')
    model.add(InputLayer(input_shape))
    model.add(Conv2D(filters[0], 5, strides=2, padding='same', activation='relu', name='conv1'))

    model.add(Conv2D(filters[1], 5, strides=2, padding='same', activation='relu', name='conv2'))

    model.add(Conv2D(filters[2], 3, strides=2, padding=pad3, activation='relu', name='conv3'))

    model.add(Flatten())
    model.add(Dense(units=filters[3], name='embedding'))
    model.add(Dense(units=filters[2]*int(input_shape[0]/8)*int(input_shape[0]/8), activation='relu'))

    model.add(Reshape((int(input_shape[0]/8), int(input_shape[0]/8), filters[2])))
    model.add(Conv2DTranspose(filters[1], 3, strides=2, padding=pad3, activation='relu', name='deconv3'))

    model.add(Conv2DTranspose(filters[0], 5, strides=2, padding='same', activation='relu', name='deconv2'))

    model.add(Conv2DTranspose(input_shape[2], 5, strides=2, padding='same', name='deconv1'))
    encoder = Model(inputs=model.input, outputs=model.get_layer('embedding').output)
    return model, encoder
Exemplo n.º 18
0
def main(train_x, train_y, test_x):
    dim = train_x.shape[1]
    x = int(dim**(1 / 2)) + 1
    y_train = np.argmax(train_y, axis=-1)
    classes = len(np.unique(y_train))
    model = Sequential()
    model.add(
        Dense(((x * x) - dim),
              name='feature',
              activation='relu',
              input_shape=(dim, )))
    model.add(Dense(classes, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    # here, inputs and labels are same
    model.fit(train_x, train_y, epochs=100, batch_size=512, verbose=0)
    extract = Model(model.inputs, model.get_layer('feature').output)

    # predict whole inputs through it
    x1 = extract.predict(train_x)
    x2 = extract.predict(test_x)

    # concatenate on horizontal axis
    train_x = np.concatenate((train_x, x1), axis=1)
    test_x = np.concatenate((test_x, x2), axis=1)

    train_x = train_x.reshape(train_x.shape[0], x, x)
    test_x = test_x.reshape(test_x.shape[0], x, x)

    return conimg(train_x), conimg(test_x)
Exemplo n.º 19
0
def transform_resnet50_to_sliding_window_conv(resnet_model, dropout=None):

    conv_preds = resnet_model.get_layer('conv_preds')

    assert conv_preds is not None, "Expecting a layer named conv_preds"

    # get the weight from the last conv1x1 layer 'conv_preds', we need this later.
    conv_preds_weights = conv_preds.get_weights()

    input_layer = resnet_model.layers[0].layers[0]
    layer_before_avgpool = resnet_model.layers[0].layers[-2]

    model_before_avgpool = Model(inputs=input_layer.input,
                                 outputs=layer_before_avgpool.output)

    sliding_window_model = Sequential()

    sliding_window_model.add(model_before_avgpool)
    sliding_window_model.add(AveragePooling2D(
        pool_size=(7, 7), strides=(1, 1)))  # swap in (1,1) stride

    if dropout is not None:
        sliding_window_model.add(Dropout(dropout))

    sliding_window_model.add(
        Conv2D(7, (1, 1), activation='softmax', name='conv_preds'))

    # load back in the weights for last conv1x1 layer 'conv_preds'

    new_conv_preds = sliding_window_model.get_layer('conv_preds')

    new_conv_preds.set_weights(conv_preds_weights)

    return sliding_window_model
Exemplo n.º 20
0
def getCodes(X):
    num_classes = 10
    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=(28, 28, 1)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(2, activation='sigmoid', name='hidden'))
    model.add(Dense(num_classes, activation='softmax'))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adadelta(),
                  metrics=['accuracy'])

    # model.fit(x_train, y_train,
    #           batch_size=batch_size,
    #           epochs=epochs,
    #           verbose=1,
    #           validation_data=(x_test, y_test))
    # model.save_weights('mnist.hd5')
    model.load_weights('mnist_2d.hd5')

    intermediate_layer_model = Model(inputs=model.input,
                                     outputs=model.get_layer('hidden').output)
    codes = intermediate_layer_model.predict(X)

    return codes
Exemplo n.º 21
0
def embedding_feature(input_file_path, output_file_path=EMBEDDED_WEIGHT_FILE, embedding_size=2):
    """
    embedding input feature into vector space
    :param input_file_path: (Str) relative path to csv file contains input feature
    :param embedding_size: (Int) number of dimensions that our embedding vector will have
    :param output_file_path: (Str) relative path to save file after embedding data, if None, save it to input file
    :return: None
    """
    df = pd.read_csv(input_file_path, sep=',', index_col=0)
    df.CandleType = (df.CandleType - 1).astype(int)

    model = Sequential()
    model.add(Embedding(input_dim=7, output_dim=embedding_size, input_length=1, name="embedding"))
    model.add(Flatten())
    model.add(Dense(25, activation="relu"))
    # model.add(Dense(15, activation="relu"))
    model.add(Dense(1))
    model.compile(loss="mse", optimizer="adam", metrics=["accuracy"])
    model.fit(x=df.CandleType.values, y=(df.HeikinClose - df.HeikinOpen).values * 10000, epochs=2, batch_size=8)
    model.summary()

    layer = model.get_layer('embedding')
    output_embeddings = layer.get_weights()
    print("output embedding weights: ", output_embeddings)
    with open(output_file_path, 'wb') as file:
        pickle.dump(output_embeddings[0], file, protocol=3)
    print("dumped embedding weights to: ", EMBEDDED_WEIGHT_FILE)
Exemplo n.º 22
0
    def model_init(self, channels, observations, num_classes):
        """
        Model is essentially two 1D convolutions
        followed by one 2d Convolution
        for extracting embeddings
        """
        model = Sequential()
        model.add(BatchNormalization(input_shape=(channels, observations, 1)))
        model.add(Conv2D(32, (1, 4), activation='relu'))
        model.add(Conv2D(25, (channels, 1), activation='relu'))
        model.add(MaxPooling2D((1, 3)))
        model.add(
            Conv2D(50, (4, 25),
                   activation='relu',
                   data_format='channels_first'))
        model.add(MaxPooling2D((1, 3)))
        model.add(Conv2D(100, (50, 2), activation='relu'))
        model.add(BatchNormalization())
        model.add(Flatten())
        reconstruction_shape = model.layers[-1].output_shape
        #         print(reconstruction_shape)
        # <--- 128 dimensional representation
        model.add(Dense(128, activation='relu', name="embedding_output"))
        # This concludes with features extraction, begin with upsampling now

        model.add(Dense(reconstruction_shape[1], activation='relu'))
        model.add(Dense(channels * observations, activation='relu'))
        model.add(Reshape((channels, observations, 1)))
        self.model = model
        self.encoder = Model(
            inputs=model.input,
            outputs=model.get_layer('embedding_output').output)
Exemplo n.º 23
0
def nn_feature(train, test=None):
    if is_object_dtype(train['性别']):
        train['性别'] = train['性别'].map({'男':0, '女':1})
    
    predictor = [column for column in train.columns if column not in ['id', '体检日期', '血糖']]

    if test is None:
        XALL = train.loc[:, predictor]
        yALL = train.loc[:, '血糖']
        sc = StandardScaler()
        XALL = sc.fit_transform(XALL)
        XALL = pd.DataFrame(XALL, columns=predictor)
    else:
        if is_object_dtype(test['性别']):
            test['性别'] = test['性别'].map({'男':0, '女':1})
        test['血糖'] = -1
        train = pd.concat([train, test])
        sc = StandardScaler()
        scaled_data = sc.fit_transform(train[predictor])
        train[predictor] = scaled_data

        test = train.loc[train['血糖'] < 0.0, predictor]
        XALL = train.loc[train['血糖'] >= 0.0, predictor]
        yALL = train.loc[train['血糖'] >= 0.0, '血糖']

    # Neural Network
    nn = Sequential()
    nn.add(Dense(units = 400 , kernel_initializer='normal', input_dim=XALL.shape[1]))
    nn.add(PReLU())
    nn.add(Dropout(.4))
    nn.add(Dense(units = 160 , kernel_initializer='normal'))
    nn.add(PReLU())
    nn.add(BatchNormalization())
    nn.add(Dropout(.6))
    nn.add(Dense(units = 64 , kernel_initializer='normal'))
    nn.add(PReLU())
    nn.add(BatchNormalization())
    nn.add(Dropout(.5))
    nn.add(Dense(units = 26, kernel_initializer='normal'))
    nn.add(PReLU())
    nn.add(BatchNormalization(name='nn_feature'))
    nn.add(Dropout(.6))
    nn.add(Dense(1, kernel_initializer='normal'))
    nn.compile(loss='mae', optimizer=Adam(lr=4e-3, decay=1e-4))

    nn_feature = Model(inputs=nn.input,
                    outputs=nn.get_layer('nn_feature').output)

    nn.fit(XALL, yALL, batch_size = 32, epochs = 70, verbose=1)
    if test is None:
        nn_f = np.zeros((XALL.shape[0], 26))
        nn_f += nn_feature.predict(XALL)
    else:
        nn_f = np.zeros((test.shape[0], 26))
        nn_f += nn_feature.predict(test)
        
    # nn_f /= 5
    nn_f = pd.DataFrame(nn_f, columns=['nn_%d' % idx for idx in range(26)])

    return nn_f
Exemplo n.º 24
0
def AE_fitting(training_x, reduced_dens):

    model = Sequential()
    if reduced_dens > 700:
        second_layer = 2400
    elif reduced_dens < 600:
        second_layer = 1800
    else:
        second_layer = 2000

    model.add(
        Dense(units=second_layer,
              activation='tanh',
              name='en1',
              input_shape=[3169]))
    model.add(Dense(units=reduced_dens, activation='tanh', name='en2'))
    model.add(Dense(units=second_layer, activation='tanh', name='de1'))
    model.add(Dense(units=3169, name='de2'))

    model.summary()

    # extract compressed feature
    model.compile(optimizer='adam', loss='mae')

    model.fit(training_x, training_x, batch_size=2000, epochs=50)
    feature_model = Model(inputs=model.input,
                          outputs=model.get_layer(name='en2').output)

    return feature_model
Exemplo n.º 25
0
def VGG_16():  #weights_path=None):
    model = Sequential()
    model.add(ZeroPadding2D(
        (1, 1), input_shape=(224, 224, 3)))  #What to do about this input size
    model.add(Convolution2D(64, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, (3, 3), activation='relu')
              )  #This is the feature representation we want to look at
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='softmax'))

    if weights_path:
        model.load_weights('vgg16_weights_tf_dim_ordering_tf_kernels.h5')

    #plot_model(model, to_file='VGG-16_model.png')

    layer_name = 'conv2d_4'
    intermediate_layer_model = Model(
        inputs=model.input, outputs=model.get_layer(layer_name).output)
    intermediate_output = intermediate_layer_model.predict(data)

    assert False

    return model
def build_model(fea_len, data, labels, char_dic):
    # 建立模型
    model = Sequential()
    # 特征嵌入  包括单个字 和 n_gram
    model.add(Embedding(fea_len, 200, input_length=300))
    # 我们增加 GlobalAveragePooling1D, 这将平均计算文档中所有词汇的的词嵌入
    model.add(GlobalAveragePooling1D())
    # 我们投射到单个单位的输出层上
    model.add(Dense(2, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer="adam",
                  metrics=['accuracy'])
    model.summary()

    # 训练模型
    model.fit(data, labels, batch_size=30, epochs=3)

    # 把词嵌入那一层拿出来
    embedding_layer = model.get_layer("embedding_1")
    emb_wight = embedding_layer.get_weights()[0]

    def word2fea(word, char_dic):
        wordtuple = tuple(char_dic.get(i) for i in word)
        return wordtuple

    mather = word2fea("妈妈", char_dic)
    index = fea_dict.get(mather)
    mama = emb_wight[index]  # 获得那一层的词向量
    print(mama)
Exemplo n.º 27
0
def AE_fitting(x_train, reduced_dimensionss):

    x_train_new = x_train.reshape(x_train.shape[0] * x_train.shape[1],
                                  x_train.shape[2])

    model = Sequential()
    model.add(
        Dense(units=reduced_dimensionss + 30,
              activation='tanh',
              name='en1',
              input_shape=[x_train.shape[2]]))
    model.add(Dense(units=reduced_dimensionss, activation='tanh', name='en2'))
    model.add(
        Dense(units=reduced_dimensionss + 30, activation='tanh', name='de1'))
    model.add(Dense(units=x_train.shape[2], name='de2'))

    model.summary()

    # extract compressed feature
    model.compile(optimizer='adam', loss='mae')

    model.fit(x_train_new, x_train_new, batch_size=1024, epochs=25)
    AE_model = Model(inputs=model.input,
                     outputs=model.get_layer(name='en2').output)

    return AE_model
class EmbeddingModel:

	def __init__(self, model_name, vocab, n):
		self.vocabulary = vocab
		self.model_name = model_name
		self.dimensions = n

	def init_model(self):
		self.model = Sequential()
		self.model.add(Dense(self.dimensions, input_shape=(self.vocabulary.count,), name="embedding"))
		self.model.add(Dense(self.vocabulary.count))
		self.model.add(Activation("softmax"))
		self.model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
		self.embedding_model = Model(inputs=self.model.input, outputs=self.model.get_layer("embedding").output)
		print("Initialized embedding model")

	def train(self, X_train, y_train, epochs, save_model=True):
		self.model.fit(X_train, y_train, epochs=epochs)
		if save_model:
			self.save_model()

	def train_generator(self, generator_func, step_count, epochs, save_model=True):
		self.model.fit_generator(generator=generator_func, steps_per_epoch=step_count, epochs=epochs)
		if save_model:
			self.save_model()

	def get_embedding(self, word):
		embedding = None
		if self.vocabulary.contains(word):
			input = self.vocabulary.word2onehot(word)
			embedding = self.embedding_model.predict(input.reshape((1, -1)))
		return embedding

	def get_model_path(self):
		cwd = os.getcwd()
		model_path = os.path.join(cwd, 'models', self.model_name, 'embedding_model_' + str(FLAGS.dimensions) + '.h5')
		return model_path

	def load_model(self):
		self.model = load_model(self.get_model_path())
		if FLAGS.debug: print(self.model.summary())
		self.embedding_model = Model(inputs=self.model.input, outputs=self.model.get_layer("embedding").output)
		print("Loaded existing embedding model")

	def save_model(self):
		self.model.save(self.get_model_path())
		print("Saved embedding model")
Exemplo n.º 29
0
 def __dict_to_discr(self, discrDict, appendage=None, pruneLastLayer=True):
     """
 Transform dictionaries of networks into discriminators.
 """
     nodes = discrDict['nodes']
     weights = discrDict['weights']
     bias = discrDict['bias']
     if coreConf() is TuningToolCores.keras:
         from keras.models import Sequential
         from keras.layers.core import Dense, Dropout, Activation
         model = Sequential()
         names = ['dense_1', 'dense_2', 'dense_3']
         if appendage:
             for i in range(
                     len(names) - 1 if pruneLastLayer else len(names)):
                 names[i] = '%s_%s' % (appendage, names[i])
         model.add(
             Dense(nodes[0],
                   input_dim=nodes[0],
                   kernel_initializer='identity',
                   trainable=False,
                   name=names[0]))
         model.add(Activation('linear'))
         model.add(
             Dense(nodes[1],
                   input_dim=nodes[0],
                   trainable=False,
                   kernel_initializer='uniform',
                   name=names[1]))
         model.add(Activation('tanh'))
         w1 = weights[0:(nodes[0] * nodes[1])]
         w1 = w1.reshape((nodes[0], nodes[1]), order='F')
         b1 = bias[0:nodes[1]]
         model.get_layer(name=names[1]).set_weights((w1, b1))
         if not pruneLastLayer:
             model.add(
                 Dense(nodes[2],
                       kernel_initializer='uniform',
                       trainable=False,
                       name=names[2]))
             model.add(Activation('tanh'))
             w2 = weights[(nodes[0] * nodes[1]):(nodes[0] * nodes[1] +
                                                 nodes[1] * nodes[2])]
             w2 = w2.reshape((nodes[1], nodes[2]), order='F')
             b2 = bias[nodes[1]:nodes[1] + nodes[2]]
             model.layers[-2].set_weights((w2, b2))
         return model
Exemplo n.º 30
0
def rowColCNN():
    vanilla_cnn = Sequential()
    vanilla_cnn.add(Conv2D(32, (11, 11), input_shape=(256, 256, 3)))
    vanilla_cnn.add(BatchNormalization())
    vanilla_cnn.add(Activation('relu'))
    vanilla_cnn.add(MaxPooling2D(pool_size=(2, 2), strides=2))
    vanilla_cnn.add(Conv2D(32, (7, 7)))
    vanilla_cnn.add(BatchNormalization())
    vanilla_cnn.add(Activation('relu', name='vanilla_cnn_output'))

    row_cnn = Conv2D(64, (3, 64))(
        vanilla_cnn.get_layer('vanilla_cnn_output').output)
    row_cnn = BatchNormalization()(row_cnn)
    row_cnn = Activation('relu')(row_cnn)
    row_cnn = Conv2D(64, (3, 36))(row_cnn)
    row_cnn = BatchNormalization()(row_cnn)
    row_cnn = Activation('relu')(row_cnn)
    row_cnn = Conv2D(64, (3, 19))(row_cnn)
    row_cnn = BatchNormalization()(row_cnn)
    row_cnn = Activation('relu')(row_cnn)
    row_cnn = Flatten()(row_cnn)
    row_cnn = Dense(4096, name='row_cnn_output')(row_cnn)

    col_cnn = Conv2D(64, (64, 3))(
        vanilla_cnn.get_layer('vanilla_cnn_output').output)
    col_cnn = BatchNormalization()(col_cnn)
    col_cnn = Activation('relu')(col_cnn)
    col_cnn = Conv2D(64, (32, 3))(col_cnn)
    col_cnn = BatchNormalization()(col_cnn)
    col_cnn = Activation('relu')(col_cnn)
    col_cnn = Conv2D(64, (19, 3))(col_cnn)
    col_cnn = BatchNormalization()(col_cnn)
    col_cnn = Activation('relu')(col_cnn)
    col_cnn = Flatten()(col_cnn)
    col_cnn = Dense(4096, name='col_cnn_output')(col_cnn)

    row_col_features = Add()([row_cnn, col_cnn])
    row_col_features = Activation('tanh')(row_col_features)
    row_col_features = Dense(512)(row_col_features)
    row_col_features = Activation('tanh')(row_col_features)
    row_col_features = Dense(256)(row_col_features)
    row_col_features = Activation('hard_sigmoid')(row_col_features)
    motion_features = Dense(30)(row_col_features)

    model = Model(inputs=vanilla_cnn.input, output=motion_features)

    return model
Exemplo n.º 31
0
def make_wider_student_model(teacher_model, train_data,
                             validation_data, init, epochs=3):
    '''Train a wider student model based on teacher_model,
       with either 'random-pad' (baseline) or 'net2wider'
    '''
    new_conv1_width = 128
    new_fc1_width = 128

    model = Sequential()
    # a wider conv1 compared to teacher_model
    model.add(Conv2D(new_conv1_width, 3, input_shape=input_shape,
                     padding='same', name='conv1'))
    model.add(MaxPooling2D(2, name='pool1'))
    model.add(Conv2D(64, 3, padding='same', name='conv2'))
    model.add(MaxPooling2D(2, name='pool2'))
    model.add(Flatten(name='flatten'))
    # a wider fc1 compared to teacher model
    model.add(Dense(new_fc1_width, activation='relu', name='fc1'))
    model.add(Dense(num_class, activation='softmax', name='fc2'))

    # The weights for other layers need to be copied from teacher_model
    # to student_model, except for widened layers
    # and their immediate downstreams, which will be initialized separately.
    # For this example there are no other layers that need to be copied.

    w_conv1, b_conv1 = teacher_model.get_layer('conv1').get_weights()
    w_conv2, b_conv2 = teacher_model.get_layer('conv2').get_weights()
    new_w_conv1, new_b_conv1, new_w_conv2 = wider2net_conv2d(
        w_conv1, b_conv1, w_conv2, new_conv1_width, init)
    model.get_layer('conv1').set_weights([new_w_conv1, new_b_conv1])
    model.get_layer('conv2').set_weights([new_w_conv2, b_conv2])

    w_fc1, b_fc1 = teacher_model.get_layer('fc1').get_weights()
    w_fc2, b_fc2 = teacher_model.get_layer('fc2').get_weights()
    new_w_fc1, new_b_fc1, new_w_fc2 = wider2net_fc(
        w_fc1, b_fc1, w_fc2, new_fc1_width, init)
    model.get_layer('fc1').set_weights([new_w_fc1, new_b_fc1])
    model.get_layer('fc2').set_weights([new_w_fc2, b_fc2])

    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(lr=0.001, momentum=0.9),
                  metrics=['accuracy'])

    train_x, train_y = train_data
    history = model.fit(train_x, train_y,
                        epochs=epochs,
                        validation_data=validation_data)
    return model, history
Exemplo n.º 32
0
def test_get_layer():
    model = Sequential()
    model.add(Dense(1, input_dim=2))
    with pytest.raises(ValueError):
        model.get_layer(index=5)
    with pytest.raises(ValueError):
        model.get_layer(index=None)
    with pytest.raises(ValueError):
        model.get_layer(name='conv')
Exemplo n.º 33
0
def make_deeper_student_model(teacher_model, train_data,
                              validation_data, init, epochs=3):
    '''Train a deeper student model based on teacher_model,
       with either 'random-init' (baseline) or 'net2deeper'
    '''
    model = Sequential()
    model.add(Conv2D(64, 3, input_shape=input_shape,
                     padding='same', name='conv1'))
    model.add(MaxPooling2D(2, name='pool1'))
    model.add(Conv2D(64, 3, padding='same', name='conv2'))
    # add another conv2d layer to make original conv2 deeper
    if init == 'net2deeper':
        prev_w, _ = model.get_layer('conv2').get_weights()
        new_weights = deeper2net_conv2d(prev_w)
        model.add(Conv2D(64, 3, padding='same',
                         name='conv2-deeper', weights=new_weights))
    elif init == 'random-init':
        model.add(Conv2D(64, 3, padding='same', name='conv2-deeper'))
    else:
        raise ValueError('Unsupported weight initializer: %s' % init)
    model.add(MaxPooling2D(2, name='pool2'))
    model.add(Flatten(name='flatten'))
    model.add(Dense(64, activation='relu', name='fc1'))
    # add another fc layer to make original fc1 deeper
    if init == 'net2deeper':
        # net2deeper for fc layer with relu, is just an identity initializer
        model.add(Dense(64, kernel_initializer='identity',
                        activation='relu', name='fc1-deeper'))
    elif init == 'random-init':
        model.add(Dense(64, activation='relu', name='fc1-deeper'))
    else:
        raise ValueError('Unsupported weight initializer: %s' % init)
    model.add(Dense(num_class, activation='softmax', name='fc2'))

    # copy weights for other layers
    copy_weights(teacher_model, model, layer_names=[
                 'conv1', 'conv2', 'fc1', 'fc2'])

    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(lr=0.001, momentum=0.9),
                  metrics=['accuracy'])

    train_x, train_y = train_data
    history = model.fit(train_x, train_y,
                        epochs=epochs,
                        validation_data=validation_data)
    return model, history