示例#1
0
    def compile(self, lr = 0.001):
        '''
            Compiles the model for training.

            Args:
                lr (float): The learningrate for which lr >= 0.

            Returns:
                None
        '''

        # Configure the model for training. We use the Adam optimiser presented
        # by https://arxiv.org/abs/1412.6980v8. For loss function we use 
        # categorical cross entropy described here 
        # https://en.wikipedia.org/wiki/Cross_entropy, and for evaluating the
        # training we use simple accuracy.
        self.model.compile(optimizer = Adam(lr = lr),
                           loss = 'categorical_crossentropy',
                           metrics = ['accuracy'])
示例#2
0
    def __init__(self, model_path, model_store_path, image_store_path, img_noise, num_channels, img_size):
        '''
        model_path(string): The address for stored model from previous training, here you need to import it!
        model_store_path(string): The address for store the trained models during training.
        image_store_path(string): The address for store the performance/generated anime faces during training.
        img_noise(int): The size of noise images for generating anime faces(img_noise * img_noise).
        num_channels(int): The number of channels for anime faces.
        img_size(int): The size of output anime faces(img_size * img_size * 3).
        
        '''
        self.model_path = model_path
        self.model_store_path = model_store_path
        self.image_store_path = image_store_path
        self.generator_base = load_model(model_path)
	self.generator_base.trainable = False
        self.noise_single = img_noise
        self.img_channels = num_channels
        self.noise_dim = self.noise_single*self.noise_single*self.img_channels
        self.img_size = img_size
        #self.img_size = img_valid # In using the super-resolution model with vaild version, That what we mentioned before in the previous section.
        #self.img_channels = 3
        self.img_flat = self.img_size * self.img_size
        self.img_shape = (self.img_size, self.img_size)
        self.img_shape_full = (self.img_size, self.img_size, self.img_channels)

        optimizer = Adam(lr = 0.0003, beta_1 = 0.5)

        self.discriminator = self.build_discriminator()
        self.discriminator.compile(loss = 'binary_crossentropy', optimizer = optimizer, metrics = ['accuracy'])
        #self.discriminator.summary()

        self.generator = self.build_generator()

        noise = Input(shape = (self.noise_dim, ))
        output_imagepre = self.generator_base(noise)
        output_image = self.generator(output_imagepre)

        self.discriminator.trainable = False

        img_correct = self.discriminator(output_image)

        self.combine = Model(noise, img_correct)
        self.combine.compile(loss = 'binary_crossentropy', optimizer = optimizer, metrics = ['accuracy'])
def get_model(hyperparameters, predictors, targets):

    # Initialising the RNN
    model = Sequential()
    regularizer = l2(0.01)
    optimizer = Adam(lr=hyperparameters['learning_rate'])

    model.add(
        CuDNNLSTM(units=30,
                  input_shape=(hyperparameters['input_sequence_length'],
                               len(predictors)),
                  return_sequences=True,
                  kernel_regularizer=regularizer))
    model.add(GaussianNoise(1e-4))
    model.add(BatchNormalization())

    model.add(
        CuDNNLSTM(units=20,
                  return_sequences=True,
                  kernel_regularizer=regularizer))
    model.add(GaussianNoise(1e-4))
    model.add(BatchNormalization())

    model.add(
        CuDNNLSTM(units=10,
                  kernel_regularizer=regularizer,
                  return_sequences=False))
    model.add(GaussianNoise(1e-4))
    model.add(BatchNormalization())

    model.add(
        Dense(hyperparameters['output_sequence_length'] * len(targets),
              activation='relu'))

    model.add(
        Reshape((hyperparameters['output_sequence_length'], len(targets))))

    model.compile(optimizer=optimizer, loss='mean_absolute_error')

    #     print(model.summary())

    return model
def train(x_train_lr, x_train_hr, epochs, batch_size):

    image_shape = (8, 8, 3)
    downscale_factor = 4
    batch_count = int(x_train_hr.shape[0] / batch_size)
    #shape = (image_shape[0]//downscale_factor, image_shape[1]//downscale_factor, image_shape[2])

    generator = model_CNN.Generator(image_shape).generator()

    adam = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

    generator.compile(loss='mean_squared_error', optimizer=adam)
    generator.summary()
    generated_train = generator.fit_generator(flow(x_train_lr,
                                                   x_train_hr,
                                                   batch_size=batch_size),
                                              validation_data=(x_test_lr,
                                                               x_test_hr),
                                              epochs=epochs)
    loss = generated_train.history['loss']
    val_loss = generated_train.history['val_loss']

    generator.save(
        '/home/ed2801/Spring2019-Proj3-spring2019-proj3-grp5/CNN_Py/V3/doc/model.h5'
    )
    generator.save_weights(
        '/home/ed2801/Spring2019-Proj3-spring2019-proj3-grp5/CNN_Py/V3/doc/weights_model.h5'
    )

    with open(
            '/home/ed2801/Spring2019-Proj3-spring2019-proj3-grp5/CNN_Py/V3/doc/loss.csv',
            'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
        wr.writerow(loss)

    with open(
            '/home/ed2801/Spring2019-Proj3-spring2019-proj3-grp5/CNN_Py/V3/doc/loss_val.csv',
            'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
        wr.writerow(val_loss)

    return generator, generated_train, loss, val_loss
def predict_all_single_fold_models():
    ds = pd.read_csv(config.SUBMISSION_FORMAT)
    classes = list(ds.columns)[1:]

    total_weight = 0.0
    result = np.zeros((ds.shape[0], NB_CAT))

    data_dir = '../output/prediction_test_frames/'

    for models in config.ALL_MODELS:
        for model_name, fold in models:
            weights_fn = f"../output/nn1_{model_name}_{fold}_full.pkl"
            print(model_name, fold, weights_fn)

            with utils.timeit_context('load data'):
                X = load_test_data(data_dir, model_name, fold)
                print(X.shape)

            model = model_nn(input_size=X.shape[1])
            model.compile(optimizer=Adam(lr=1e-4),
                          loss='binary_crossentropy',
                          metrics=['accuracy'])
            model.load_weights(weights_fn)

            with utils.timeit_context('predict'):
                prediction = model.predict(X)
                weight = config.MODEL_WEIGHTS[model_name]
                result += prediction * weight
                total_weight += weight

    os.makedirs('../submissions', exist_ok=True)
    result /= total_weight

    for clip10 in [5, 4, 3, 2]:
        clip = 10**(-clip10)
        for col, cls in enumerate(classes):
            ds[cls] = np.clip(result[:, col] * (1 - clip * 2) + clip, clip,
                              1.0 - clip)
        ds.to_csv(
            f'../submissions/submission_single_folds_models_nn_clip_{clip10}.csv',
            index=False,
            float_format='%.8f')
def train_model_nn_combined_folds(combined_model_name,
                                  model_with_folds,
                                  load_cache=True):
    X_combined = []
    y_combined = []

    for model_name, fold in model_with_folds:
        with utils.timeit_context('load data'):
            X, y, video_ids = load_train_data(model_name, fold)
            X_combined.append(X)
            y_combined.append(y)

    X = np.row_stack(X_combined)
    y = np.row_stack(y_combined)

    print(X.shape, y.shape)
    model = model_nn(input_size=X.shape[1])
    model.compile(optimizer=Adam(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    model.summary()

    batch_size = 64

    def cheduler(epoch):
        if epoch < 32:
            return 1e-3
        if epoch < 48:
            return 4e-4
        if epoch < 80:
            return 1e-4
        return 1e-5

    model.fit(X,
              y,
              batch_size=batch_size,
              epochs=128,
              verbose=1,
              callbacks=[LearningRateScheduler(schedule=cheduler)])

    model.save_weights(
        f"../output/nn_combined_folds_{combined_model_name}.pkl")
示例#7
0
    def network(self, weights=None):

        num_inp = Input(shape=[self.state_length])
        num_feats = Dense(70, activation='relu')(num_inp)
        num_feats = Dense(40, activation='relu')(num_feats)

        board_inp = Input(shape=[10, 10, 10])

        board_feats = Dropout(rate=0.05)(
            BatchNormalization()(Conv2D(30,
                                        kernel_size=(3, 3),
                                        strides=(1, 1),
                                        activation='relu')(board_inp)))

        board_feats = Dropout(rate=0.05)(
            BatchNormalization()(Conv2D(30,
                                        kernel_size=(3, 3),
                                        strides=(1, 1),
                                        activation='relu')(board_feats)))

        board_feats = (Conv2D(30,
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              activation='relu')(board_feats))

        board_feats = Flatten()(board_feats)
        board_feats = Dropout(rate=0.05)(Dense(150,
                                               activation='relu')(board_feats))
        #board_feats = Dense(50, activation='relu')(board_feats)
        feats = Dropout(rate=0.05)(Concatenate()([num_feats, board_feats]))
        feats = Dropout(rate=0.02)(Dense(150, activation='relu')(feats))
        feats = Dense(60, activation='relu')(feats)
        output = Dense(4)(feats)

        model = Model([num_inp, board_inp], output)
        model.summary()
        opt = Adam(lr=self.learning_rate, )
        model.compile(loss='mse', optimizer=opt)

        if weights:
            model.load_weights(weights)
        return model
示例#8
0
def train():
    #tf.reset_default_graph()
    x_train = loadimages()
    x_train = (x_train.astype(np.float32) - 127.5) / 127.5
    x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], CHANNEL_OF_IMAGE)

    g = generator()
    d= discriminator()

    optimize = Adam(lr=learning_rate, beta_1=0.5)
    d.trainable = True
    d.compile(loss='binary_crossentropy',
              metrics=['accuracy'],
              optimizer=optimize)

    d.trainable = False
    dcgan = Sequential([g, d])
    dcgan.compile(loss='binary_crossentropy',
                  metrics=['accuracy'],
                  optimizer=optimize)

    num_batches = x_train.shape[0] // batch_size
    gen_img = np.array([np.random.uniform(-1, 1, INPUT_DIM_START) for _ in range(49)])
    y_d_true = [1] * batch_size
    y_d_gen = [0] * batch_size
    y_g = [1] * batch_size

    for epoch in range(num_epoch):
        for i in range(num_batches):
            x_d_batch = x_train[i*batch_size:(i+1)*batch_size]
            x_g = np.array([np.random.normal(0, 0.5, INPUT_DIM_START) for _ in range(batch_size)])
            x_d_gen = g.predict(x_g)

            d_loss = d.train_on_batch(x_d_batch, y_d_true)
            d_loss = d.train_on_batch(x_d_gen, y_d_gen)

            g_loss = dcgan.train_on_batch(x_g, y_g)
            show_progress(epoch, i, g_loss[0], d_loss[0], g_loss[1], d_loss[1])

        image = combine_images(g.predict(gen_img))
        image = image * 127.5 + 127.5
        Image.fromarray(image.astype(np.uint8)).save(image_path + "%03d.png" % (epoch))
示例#9
0
def SRDenseNetKeras(inputs, nblocks=8, nlayers=8):
    logits = Conv2D(filters=16,
                    kernel_size=3,
                    strides=1,
                    padding='same',
                    activation="relu",
                    use_bias=True)(inputs)

    gggggg = logits

    for i in xrange(nblocks):
        logits = SRDenseNetBlock(logits, i, nlayers)
        logits = concatenate([logits, gggggg])

    logits = Conv2D(filters=256,
                    kernel_size=1,
                    padding='same',
                    activation="relu",
                    use_bias=True)(logits)

    logits = Conv2DTranspose(filters=256,
                             kernel_size=3,
                             strides=2,
                             padding='same',
                             activation="relu",
                             use_bias=True)(logits)
    logits = Conv2DTranspose(filters=256,
                             kernel_size=3,
                             strides=2,
                             padding='same',
                             activation="relu",
                             use_bias=True)(logits)

    logits = Conv2D(filters=1, kernel_size=3, padding='same',
                    use_bias=True)(logits)

    mModel = Model(inputs, logits)
    mModel.compile(optimizer=Adam(lr=0.00001),
                   loss='mean_squared_error',
                   metrics=['mean_squared_error'])

    return mModel
示例#10
0
def vgg_12_bn(pretrained_weights=None, input_size=(256, 256, 1)):

    new_input = Input(shape=input_size)

    model = VGG16(include_top=False, input_tensor=new_input, weights=None)

    # Say not to train ResNet model layers as they are already trained
    #for layer in model.layers:
    #	layer.trainable = False

    # extract first convolutional layer of vgg & apply BN to first two conv layers
    conv1 = model.layers[1].output
    bn1 = BatchNormalization()(conv1)
    conv2 = model.layers[2](bn1)
    bn2 = BatchNormalization()(conv2)
    x = model.layers[3](bn2)

    # append other layers of vgg network
    for layer in model.layers[4:15]:
        x = layer(x)

    # append final dense layers to model
    pool1 = AveragePooling2D(pool_size=(4, 4))(x)
    flat1 = Flatten()(pool1)
    dense1 = Dense(1024, activation="relu")(flat1)
    drop1 = Dropout(0.5)(dense1)
    dense2 = Dense(1, activation="linear")(drop1)

    model = Model(inputs=model.inputs, outputs=dense2)

    # Compile Our Transfer Learning Model
    model.compile(loss='mean_squared_error',
                  optimizer=Adam(lr=1e-4),
                  metrics=['mse', 'mae', 'mape'])

    print(model.summary())

    if (pretrained_weights):
        print('Using pretrained weights:', pretrained_weights)
        model.load_weights(pretrained_weights)

    return model
示例#11
0
def made_model():
    model = Sequential()

    model.add(InputLayer(input_shape=(4, 50, 600, 800, 3)))
    model.add(
        TimeDistributed(TimeDistributed(MaxPooling2D(pool_size=2, strides=2))))
    model.add(
        TimeDistributed(
            TimeDistributed(
                Conv2D(kernel_size=3,
                       strides=1,
                       filters=5,
                       padding='same',
                       activation='relu',
                       name='layer_conv1'))))
    model.add(
        TimeDistributed(TimeDistributed(MaxPooling2D(pool_size=2, strides=2))))
    model.add(
        TimeDistributed(
            TimeDistributed(
                Conv2D(kernel_size=5,
                       strides=1,
                       filters=20,
                       padding='same',
                       activation='relu',
                       name='layer_conv2'))))
    model.add(
        TimeDistributed(TimeDistributed(MaxPooling2D(pool_size=2, strides=2))))
    model.add(TimeDistributed(TimeDistributed(Flatten())))
    model.add(TimeDistributed(TimeDistributed(Dense(128, activation='relu'))))

    model.add(
        TimeDistributed(SimpleRNN(64, return_sequences=False, stateful=False)))
    model.add(SimpleRNN(64, return_sequences=False, stateful=False))
    model.add(Dense(6, activation='softmax'))

    optimizer = Adam(lr=1e-4)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    return model
示例#12
0
    def build_model(external_dim, CFN):
        c_conf = (len_closeness, channel, H, W) if len_closeness > 0 else None
        p_conf = (len_period, channel, H, W) if len_period > 0 else None
        t_conf = (len_trend, channel, H, W) if len_trend > 0 else None

        model = stresnet(c_conf=c_conf,
                         p_conf=p_conf,
                         t_conf=t_conf,
                         external_dim=external_dim,
                         nb_residual_unit=R_N,
                         CF=CFN)

        adam = Adam(lr=lr)
        model.compile(loss='mse',
                      optimizer=adam,
                      metrics=[metrics.rmse, metrics.mae])
        model.summary()
        # from tensorflow.python.keras.utils.visualize_util import plot
        # plot(model, to_file='model.png', show_shapes=True)
        return model
    def __init__(self,
                 inputs: int,
                 outputs: int,
                 load_from_filepath: str = None,
                 **kwargs):
        from tensorflow.python.keras.layers import Input

        self.action_input: Input = None

        super().__init__(inputs, outputs, load_from_filepath, **kwargs)

        if load_from_filepath:
            from tensorflow.python.keras.optimizers import Adam
            self.model.compile(optimizer=Adam(lr=1e-6), loss='mse')
            self.action_input = self.model.get_layer("action_input").input
            self.observation_input = self.model.get_layer(
                "observation_input").input

        # After build_model is called and self.model is set.
        self.action_input_index = self.model.input.index(self.action_input)
示例#14
0
    def build_model(self,
                    loss='mean_squared_error',
                    optimizer=Adam(1.0e-4),
                    regularizer=0.01):

        # Setup tensorboard for viewing model development
        time_stamp = time.time()
        path = os.path.dirname(
            os.path.realpath(__file__))  # get python file path
        self.tensorboard = TensorBoard(
            log_dir="{}/logs/{}".format(path, time_stamp))
        print(
            "Run `tensorboard --logdir=\"{}/logs/{}\"` and see `http://localhost:6006` to see training status and graph"
            .format(path, time_stamp) + "\n\n")

        self.model.build_model(loss=loss,
                               optimizer=optimizer,
                               regularizer=regularizer)

        return self.tensorboard
示例#15
0
def build_model2():
    effnet = efn.EfficientNetB5(weights=None,
                                include_top=False,
                                input_shape=(IMG_SIZE, IMG_SIZE, 3))
    effnet.load_weights(
        '/home/td/桌面/efficientnet-b5_imagenet_1000_notop.h5')
    for i, layer in enumerate(effnet.layers):
        if "batch_normalization" in layer.name:
            effnet.layers[i] = layers.BatchNormalization(groups=32,
                                                         axis=-1,
                                                         epsilon=0.00001)
    model = Sequential()
    model.add(effnet)
    model.add(layers.GlobalAveragePooling2D())
    model.add(layers.Dense(1024,activation="relu"))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(5, activation="softmax"))
    # model.add(layers.Dense(1, activation="linear"))
    model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0005), metrics=['acc'])
    return model
示例#16
0
    def __init__(self,old_new='old',slt_num=0):
        super(DHSR_model, self).__init__()
        self.mf_fc_unit_nums = new_Para.param.DHSR_layers1
        self.mf_embedding_dim = new_Para.param.mf_embedding_dim
        self.sims_dict = get_sims_dict(False,True) # 相似度对象,可改参数?
        self.sim_feature_size=new_Para.param.sim_feature_size
        self.final_MLP_layers = new_Para.param.DHSR_layers2
        if old_new=='old' and slt_num ==0:
            self.simple_name = 'DHSR'
        else:
            self.simple_name = 'DHSR_{}_{}'.format(old_new,slt_num)

        self.model_name = '_mf_fc_unit_nums:{} '.format(self.mf_fc_unit_nums).replace(',', ' ')
        self.model_name += '_mfDim{}_ '.format(self.mf_embedding_dim)
        self.model_name += 'final_MLP_layers:{} '.format(self.final_MLP_layers).replace(',', ' ')
        self.model_name += '_simSize{}_ '.format(self.sim_feature_size)
        self.model_dir = dataset.crt_ds.model_path.format(self.get_simple_name())  # 模型路径

        self.lr = new_Para.param.CI_learning_rate # 内容部分学习率
        self.optimizer = Adam(lr=self.lr)
示例#17
0
def get_model(input_shape, lr):
    model = Sequential()

    model.add(Conv2D(16, kernel_size=(3, 3), input_shape=input_shape))
    # model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(32, kernel_size=(3, 3)))
    # model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(64, kernel_size=(3, 3)))
    # model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    # model.add(Flatten())
    model.add(GlobalAveragePooling2D())
    model.add(Dropout(0.1))

    model.add(Dense(200))
    # model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.1))

    model.add(Dense(100))
    # model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(Dense(20))  # linear try here
    # model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(Dense(1))

    optimizer = Adam(lr)
    model.compile(loss='mean_squared_error', optimizer=optimizer)

    return model
示例#18
0
def create_model(learning_rate, num_dense_layers,
                 num_dense_nodes, emb_size):
    """
    Hyper-parameters:
    learning_rate:     Learning-rate for the optimizer.
    num_dense_layers:  Number of dense layers.
    num_dense_nodes:   Number of nodes in each dense layer.
    activation:        Activation function for all layers.
    """
    embedding_size = emb_size
    # Start construction of a Keras Sequential model.
    model = Sequential()

    model.add(Embedding(input_dim=num_words,
                    output_dim=embedding_size,
                    #embeddings_initializer=word2vec
                    input_length=max_tokens,
                    name='layer_embedding'))
    # Add fully-connected / dense layers.
    # The number of layers is a hyper-parameter we want to optimize.
    for i in range(num_dense_layers):
        # Name of the layer. This is not really necessary
        # because Keras should give them unique names.
        name = 'layer_dense_{0}'.format(i+1)
        if num_dense_layers == 1:
            model.add(Bidirectional(LSTM(units=num_dense_nodes)))
            model.add(Dropout(0.2))
        else:
            if i == 0:
                model.add(Bidirectional(LSTM(units=num_dense_nodes, return_sequences=True)))
                model.add(Dropout(0.2))
            else:
                model.add(Bidirectional(LSTM(units=num_dense_nodes)))
                model.add(Dropout(0.2))
            
    model.add(Dense(9, activation='softmax'))
    optimizer = Adam(lr=learning_rate)
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])        
    return model        
示例#19
0
    def __build_and_compile_model(self):

        model_generator = {
            'resnet34': ModelGenerator(),
            'resnet50': ModelGenerator(detector='resnet50'),
            'kaggle': ModelGeneratorKaggle()
        }

        # Generate a model
        model = self.__model_utils.build_model(
            model_generator=model_generator[self.__model_params['model']],
            input_shape=(self.__model_params['input_width'],
                         self.__model_params['input_height'],
                         self.__model_params['input_channels']),
            mode='detection',
            n_category=1)

        # Restore the saved weights
        if self.__model_params['restore_weights']:
            self.__model_utils.restore_weights(
                model=model,
                init_epoch=self.__model_params['initial_epoch'],
                weights_folder_path=self.__weights_path)

        # Initialize the decay, if defined
        try:
            decay = float(self.__model_params['decay'])
        except ValueError:
            decay = None

        # Compile the model
        model.compile(optimizer=Adam(lr=self.__model_params['learning_rate'],
                                     decay=decay if decay else 0.0),
                      loss=self.__metrics.all_loss,
                      metrics=[
                          self.__metrics.size_loss,
                          self.__metrics.heatmap_loss,
                          self.__metrics.offset_loss
                      ])

        return model
    def train(self):
        #下面使用高阶语句构建模型
        model = self.build_model()
        model.compile(loss='categorical_crossentropy',
              # optimizer=RMSprop(lr=0.001),
              optimizer = Adam(lr=self.learn_rate),
            #   optimizer=tf.train.AdamOptimizer(learning_rate=self.learn_rate),
              metrics=['accuracy'])

        callbacks = [EarlyStopping(
            monitor='val_loss', patience=2)]

        # All images will be rescaled by 1./255
        train_datagen = ImageDataGenerator(rescale=1./255)
        test_datagen = ImageDataGenerator(rescale=1./255)

        # Flow training images in batches of 20 using train_datagen generator
        train_generator = train_datagen.flow_from_directory(
                self.train_path,  # This is the source directory for training images
                target_size=(DIVIDE_IMAGE_HEIGHT,DIVIDE_IMAGE_WEIGHT),  
                color_mode='grayscale',
                batch_size=BATCH_SIZE,
                # Since we use binary_crossentropy loss, we need binary labels
                class_mode='categorical')

        # Flow validation images in batches of 20 using test_datagen generator
        validation_generator = test_datagen.flow_from_directory(
                self.valid_path,
                target_size=(DIVIDE_IMAGE_HEIGHT,DIVIDE_IMAGE_WEIGHT),
                color_mode='grayscale',
                batch_size=BATCH_SIZE,
                class_mode='categorical')

        history = model.fit_generator(
            train_generator,
            epochs=1000,
            callbacks=callbacks,
            validation_data=validation_generator,
            verbose=2)
        self.model = model
        model.save(self.model_path)
示例#21
0
def fgcnn_xdeepfm_experiment(data_path, dataset_type='critero'):
    opt = Adam(lr=0.001)
    model_params = {
        'dnn_hidden_units': (256, 256),
        'cin_layer_size': (
            128,
            128,
        ),
        'cin_split_half': True,
        'conv_kernel_width': (9, 9, 9, 9),
        'conv_filters': (38, 40, 42, 44),
        'new_maps': (3, 3, 3, 3),
        'pooling_width': (1, 1, 1, 1),
        'l2_reg_linear': 1e-5,
        'l2_reg_embedding': 1e-5,
        'l2_reg_dnn': 0,
        'dnn_dropout': 0,
        'task': 'binary'
    }
    run_base_experiment(data_path, dataset_type, model_params, FGCNN_xDeepFM,
                        opt)
示例#22
0
def sentiment_analysis(num_words, max_tokens):
    model = Sequential()

    #Embedding Layer. This layer will output the word vectors for each one of the words in the sentence
    embedding_size = 8
    model.add(Embedding(input_dim=num_words, 
                        output_dim=embedding_size,
                       input_length=max_tokens,
                       name='embedding_layer'))

    model.add(LSTM(units=16, return_sequences=True))
    model.add(LSTM(units=8, return_sequences=True))
    model.add(LSTM(units=4, return_sequences=False))
    model.add(Dense(1, activation='sigmoid'))

    optimizer = Adam(lr=0.001)
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    
    return model
示例#23
0
    def build_model(self) -> 'Model':
        from tensorflow.python.keras.layers import Dense, Activation
        from tensorflow.python.keras.models import Model
        from tensorflow.python.keras.optimizers import Adam

        inputs, x = self._get_input_layers()

        x = Dense(256)(x)
        x = Activation('relu')(x)
        x = Dense(256)(x)
        x = Activation('relu')(x)
        x = Dense(256)(x)
        x = Activation('relu')(x)

        x = Dense(1, activation='linear')(x)

        model = Model(inputs=inputs, outputs=x)

        model.compile(optimizer=Adam(lr=self.learning_rate), loss='mse')
        # print(model.summary())
        return model
示例#24
0
    def launch_neural(self):
        vgg16_net = VGG16(weights='imagenet',
                          include_top=False,
                          input_shape=(161, 161, 3))
        vgg16_net.trainable = False

        model = Sequential()
        model.add(vgg16_net)
        # Добавляем в модель новый классификатор
        model.add(Flatten())
        model.add(Dense(256))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(2))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer=Adam(lr=1e-5),
                      metrics=['accuracy'])
        model.load_weights("mnist_model.h5")
        return model
示例#25
0
    def __init__(self, state_dim, action_size, gamma=0.99):
        self.state_dim = state_dim
        self.action_size = action_size
        self.memory = deque(maxlen=2000)
        self.gamma = gamma

        self.batch_size = 64
        self.epsilon = 1.0  # exploration rate
        self.epsilon_min = 0.1
        self.epsilon_decay = 0.995

        self.q_model = self._build_model()
        self.q_model.compile(loss='mse', optimizer=Adam(lr=0.001))
        self.q_model.predict_one = lambda x: self.q_model.predict(np.array([x])
                                                                  )[0]
        self.target_q_model = self._build_model()
        self.target_q_model.predict_one = lambda x: self.target_q_model.predict(
            np.array([x]))[0]

        self.update_target_q_weights(
        )  # target Q network 의 파라미터를 Q-newtork 에서 복사
def define_model(num_tokens, max_tokens):
    '''
    Defines the model definition based on input parameters
    '''
    model = Sequential()
    model.add(
        Embedding(input_dim=num_tokens,
                  output_dim=EMBEDDING_SIZE,
                  input_length=max_tokens,
                  name='layer_embedding'))

    model.add(GRU(units=16, name="gru_1", return_sequences=True))
    model.add(GRU(units=8, name="gru_2", return_sequences=True))
    model.add(GRU(units=4, name="gru_3"))
    model.add(Dense(1, activation='sigmoid', name="dense_1"))
    optimizer = Adam(lr=1e-3)
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    print model.summary()
    return model
示例#27
0
def load_model():
    from tensorflow.python.keras.models import model_from_json
    from tensorflow.python.keras.optimizers import Adam
    from tensorflow.python.keras.losses import mean_squared_logarithmic_error

    with open(model_path, "r") as json_file:

        model = model_from_json(json_file.read())
        model.load_weights(weights_path)
        model._make_predict_function()

        optimizer = Adam(lr=0.001)

        # Compile the model for using it to classify the song
        model.compile(loss=mean_squared_logarithmic_error,
                      optimizer=optimizer,
                      metrics=['accuracy'])

        print('Model loaded!')

    return model
示例#28
0
def build_model_1(f_size):
    dim_input = len(f_size)

    input_x = [Input(shape=(1, )) for i in range(dim_input)]
    biases = [get_embed(x, size, 1) for (x, size) in zip(input_x, f_size)]
    factors = [
        get_embed(x, size, k_latent) for (x, size) in zip(input_x, f_size)
    ]

    s = Add()(factors)
    diffs = [Subtract()([s, x]) for x in factors]
    dots = [Dot(axes=1)([d, x]) for d, x in zip(diffs, factors)]

    x = Concatenate()(biases + dots)
    x = BatchNormalization()(x)
    output = Dense(1, activation='relu', kernel_regularizer=l2(kernel_reg))(x)
    model = Model(inputs=input_x, outputs=[output])
    model.compile(optimizer=Adam(clipnorm=0.5), loss='mean_squared_error')
    output_f = factors + biases
    model_features = Model(inputs=input_x, outputs=output_f)
    return model, model_features
示例#29
0
    def create_model(self):
        model = Sequential()

        #model.add(Conv2D(16, (3, 3), input_shape=env.OBSERVATION_SPACE_VALUES))  # OBSERVATION_SPACE_VALUES = (10, 10, 3) a 10x10 RGB image.
        #model.add(Activation('relu'))
        #model.add(MaxPooling2D(pool_size=(2, 2)))
        #model.add(Dropout(0.2))

        #model.add(Conv2D(256, (3, 3)))
        #model.add(Activation('relu'))
        #model.add(MaxPooling2D(pool_size=(2, 2)))
        #model.add(Dropout(0.2))

        model.add(Input(shape=(2,)))
        model.add(Dense(8))

        #model.add(Dense(64))

        model.add(Dense(self.env.action_space.n, activation='linear'))  # ACTION_SPACE_SIZE = how many choices (9)
        model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=['accuracy'])
        return model
示例#30
0
def rnn_train():
    tokenizer= load(path+'tokenizer_ref.pkl')
    X_train_token = load(path +'X_train_token_project.sav')
    X_dev_token = load(path +'X_dev_token_project.sav')
    y_train = load(path +'y_train_project.sav')
    y_dev = load(path +'y_dev_project.sav')
    paragram_embeddings = load_para(tokenizer.word_index)
    
    model = Sequential()
    optimizer = Adam(lr=1e-3)
    model.add(Embedding(weights=[paragram_embeddings], trainable=False, input_dim=num_words, output_dim=embedding_size, input_length=max_tokens))
    model.add(GRU(units=32, return_sequences=True))
    model.add(GRU(units=16, dropout=0.5, return_sequences=True))
    model.add(GRU(units=8, return_sequences=True))
    model.add(GRU(units=4))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['AUC', 'accuracy'])
    model.summary()
    history = model.fit(np.array(X_train_token), y_train, validation_data=(np.array(X_dev_token),y_dev), epochs=4, batch_size=500)
    save_model(model,path+'rnn_model.h5')
    print('train complete')