コード例 #1
0
def build_model():
    model = Sequential()
    model.add(Conv2D(64, (5, 5), (1, 1), "SAME", activation="relu", input_shape=(306, 408, 3)))
    model.add(MaxPool2D((3, 3), (2, 2), 'same'))
    model.add(Conv2D(64, (5, 5), (1, 1), "SAME", activation="relu"))
    model.add(MaxPool2D((3, 3), (2, 2), 'same'))
    model.add(Conv2D(64, (5, 5), padding="SAME", activation='relu'))
    model.add(MaxPool2D((3, 3), (2, 2), 'same'))
    model.add(Conv2D(16, (5, 5), padding="SAME", activation='relu'))
    model.add(MaxPool2D((3, 3), (2, 2), 'same'))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(512, activation='relu'))
    model.add(Dense(8, activation='relu'))
    optimizer = Adadelta()
    model.compile(optimizer, loss=mean_squared_error)
    print(model.summary())
    train_X, train_y = GET_DATA.get_batches_data()
    cost_values = []
    for step in range(1000):
        cost = model.train_on_batch(train_X, train_y)
        cost_values.append(cost)
        if step % 10 == 0:
            print("step %d , cost value is %.3f" % (step, cost))
    model.save("./model1.h5")
    plt.plot(cost_values)
    plt.show()
コード例 #2
0
ファイル: base.py プロジェクト: tdefa/big-fish
def get_optimizer(optimizer_name="adam", **kwargs):
    """Instantiate the optimizer.

    Parameters
    ----------
    optimizer_name : str
        Name of the optimizer to use.

    Returns
    -------
    optimizer : tf.keras.optimizers
        Optimizer instance used in the model.

    """
    # TODO use tensorflow optimizer
    if optimizer_name == "adam":
        optimizer = Adam(**kwargs)
    elif optimizer_name == "adadelta":
        optimizer = Adadelta(**kwargs)
    elif optimizer_name == "adagrad":
        optimizer = Adagrad(**kwargs)
    elif optimizer_name == "adamax":
        optimizer = Adamax(**kwargs)
    elif optimizer_name == "sgd":
        optimizer = SGD(**kwargs)
    else:
        raise ValueError(
            "Instead of {0}, optimizer must be chosen among "
            "['adam', 'adadelta', 'adagrad', adamax', sgd'].".format(
                optimizer_name))

    return optimizer
コード例 #3
0
def _cnn(x_shape):
    """https://github.com/lykaust15/Deep_learning_examples/blob/master/
    8.RBP_prediction_CNN/RBP_binding_site_prediction.ipynb"""
    model = Sequential([
        Conv1D(128, (10, ),
               activation='relu',
               input_shape=(x_shape[1], x_shape[2])),
        Dropout(0.25),
        MaxPooling1D(pool_size=(3, ), strides=(1, )),
        Conv1D(128, (10, ), activation='relu', padding='same'),
        Dropout(0.25),
        MaxPooling1D(pool_size=(3, ), strides=(1, )),
        Conv1D(256, (5, ), activation='relu', padding='same'),
        Dropout(0.25),
        GlobalAveragePooling1D(),
        Dropout(0.25),
        Dense(128, activation='relu'),
        Dropout(0.5),
        Dense(1, activation='sigmoid')
    ])

    model.compile(loss='binary_crossentropy',
                  optimizer=Adadelta(),
                  metrics=['accuracy'])

    return model
コード例 #4
0
    def build_graph(self):
        self.model = Sequential()

        self.model.add(
            Conv2D(DigitClassifier.layer_1_size,
                   kernel_size=(3, 3),
                   trainable=True,
                   activation='relu',
                   input_shape=self.mnist_data.input_shape))
        self.model.add(
            Conv2D(DigitClassifier.layer_2_size, (3, 3),
                   activation='relu',
                   trainable=True))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.model.add(Dropout(0.25))
        self.model.add(Flatten())
        self.model.add(Dense(DigitClassifier.layer_3_size, activation='relu'))
        self.model.add(Dropout(0.5))

        # self.model.add(Flatten(input_shape=DigitClassifier.mnist_data.input_shape))

        self.model.add(Dense(self.num_classes, activation='softmax'))

        self.model.compile(loss=categorical_crossentropy,
                           optimizer=Adadelta(),
                           metrics=['accuracy'])
        self.model._make_predict_function()
        self.graph = tf.get_default_graph()
コード例 #5
0
 def __compile(self):
     optimizer = Adadelta(
     )  # gradient clipping is not there in Adadelta implementation in keras
     #        optimizer = 'adam'
     self.model.compile(loss='mse',
                        optimizer=optimizer,
                        metrics=[pearson_correlation])
コード例 #6
0
 def freeze_first_layers(self):
     self.model.layers[0].trainable = False
     self.model.layers[1].trainable = True
     self.model.compile(loss=categorical_crossentropy,
                        optimizer=Adadelta(),
                        metrics=['accuracy'])
     self.model._make_predict_function()
     self.graph = tf.get_default_graph()
コード例 #7
0
    def learn(self, style_name):
        img_paths = glob.glob("transfer/ns_model/train_img/*")
        batch_size = 2
        epochs = 5
        input_shape = (224, 224, 3)
        input_size = input_shape[:2]
        style = glob.glob("media/style/*")[0].split("\\")[-1]

        img_sty = load_img(
            'media/style/'+style,
            target_size=input_size
        )
        img_arr_sty = np.expand_dims(img_to_array(img_sty), axis=0)
        self.y_true_sty = self.model_sty.predict(img_arr_sty)
        shutil.rmtree("./media/style")
        os.mkdir("./media/style")

        self.gen = self.train_generator(
            img_paths,
            batch_size,
            self.model_con,
            self.y_true_sty,
            epochs=epochs
        )

        gen_output_layer = self.model_gen.layers[-1]
        tv_loss = self.TVRegularizer(gen_output_layer.output)
        gen_output_layer.add_loss(tv_loss)

        self.model.compile(
            optimizer = Adadelta(),
            loss = [
                self.style_loss,
                self.style_loss,
                self.style_loss,
                self.style_loss,
                self.feature_loss
            ],
            loss_weights = [1.0, 1.0, 1.0, 1.0, 3.0]
        )

        now_epoch = 0
        min_loss = np.inf
        steps_per_epoch = math.ceil(len(img_paths)/batch_size)

        # 学習
        for i , (X_train, y_train) in enumerate(self.gen):
            if i % steps_per_epoch == 0:
                now_epoch += 1

            loss = self.model.train_on_batch(X_train, y_train)
            if loss[0]<min_loss:
                min_loss = loss[0]
                self.model.save("transfer/ns_model/pretrained_model/" + style_name + ".h5")

            print("epoch: {}, iters: {}, loss: {:.3f}".format(now_epoch, i, loss[0]))
コード例 #8
0
def train_evaluate():

    # Generate training and validation data generators 
    def get_image_list(data_dir):
       dataset = []
       for folder in os.listdir(data_dir):
          for image in os.listdir(os.path.join(data_dir, folder)):
             dataset.append((os.path.join(data_dir, folder, image), folder)) 
       return dataset      

    training_data = ImageSequence(get_image_list(os.path.join(FLAGS.data_dir, 'train')), FLAGS.batch_size, True)
    validation_data = ImageSequence(get_image_list(os.path.join(FLAGS.data_dir, 'test')), FLAGS.batch_size, False)

    # Horovod: Initialize Horovod
    hvd.init()

    # Horvod: Pin GPU to be used to process local rank (one GPU per process)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.visible_device_list = str(hvd.local_rank())
    tf.keras.backend.set_session(tf.Session(config=config))

    # Create a model
    model = network_model(FLAGS.hidden_units)
    loss = 'categorical_crossentropy'

    # Horovod: Adjust learning rate based on number of GPUs
    optimizer = Adadelta(lr=1.0 * hvd.size())
    # Horovod: add Horovod Distributed Optimizer
    optimizer = hvd.DistributedOptimizer(optimizer)

    metrics = ['acc']
    model.compile(optimizer, loss, metrics)
  
    # Set up callbacks
    callbacks = [
        # Broadcast initial variable states from rank 0 to all other processes
        hvd.callbacks.BroadcastGlobalVariablesCallback(0),
    ]
    
    # Horovod: save  logs only on worker 0
    if hvd.rank() == 0:
        callbacks.append(tf.keras.callbacks.TensorBoard(log_dir=FLAGS.log_dir))

    # Start training
    model.fit_generator(generator = training_data,
                        validation_data = validation_data,
                        epochs = FLAGS.epochs,
                        use_multiprocessing = True,
                        workers = 4,
                        callbacks = callbacks,
                        verbose = 1)

    # Save the model
    model.save(FLAGS.save_model_path)
コード例 #9
0
def compile_model(train_model):
    train_model.compile(optimizer=Adadelta(),
                        loss=[
                            style_images.style_feature_loss,
                            style_images.style_feature_loss,
                            style_images.style_feature_loss,
                            style_images.style_feature_loss,
                            contents_images.contents_feature_loss
                        ],
                        loss_weights=[1.0, 1.0, 1.0, 1.0, 4.0])

    return train_model
コード例 #10
0
def autoencoder_hyperopt(_n_features, _hparams=default_autoencoder_hyperopt):
    """
    Creazione struttura autoencoder Keras parametrica.
    
    :param _n_features: int
        numero input rete neurale. Configurazione tensore principale
    :param _hparams: dict
        parametri per la configurazione della rete
    
    :return: Model
        autoencoder creato. Keras model
    """
    input_layer = Input(shape=(_n_features, ))
    for layer in range(1, int(_hparams['num_layers']) + 1):

        hidden = Dense(
            units=int(_hparams['num_unit_' + str(layer)]),
            activation=_hparams['actv_func'],
            activity_regularizer=regularizers.l1(
                _hparams['l1_reg']))(input_layer if layer == 1 else hidden)
        if _hparams['drop_enabled']:
            hidden = Dropout(rate=_hparams['drop_factor'])(hidden)

    for layer in reversed(range(1, int(_hparams['num_layers']) + 1)):
        hidden = Dense(units=int(_hparams['num_unit_' + str(layer)]),
                       activation=_hparams['actv_func'],
                       activity_regularizer=regularizers.l1(
                           _hparams['l1_reg']))(hidden)
        if _hparams['drop_enabled']:
            hidden = Dropout(rate=_hparams['drop_factor'])(hidden)

    output_layer = Dense(_n_features,
                         activation=_hparams['actv_func_out'])(hidden)

    autoencoder = Model(input_layer, output_layer)

    if _hparams['optimizer'] == 'adadelta':
        opt_net = Adadelta(_hparams['learn_rate_opt'], rho=0.95)
    elif _hparams['optimizer'] == 'adam':
        opt_net = Adam(_hparams['learn_rate_opt'],
                       beta_1=0.9,
                       beta_2=0.999,
                       amsgrad=False)
    else:
        opt_net = Adam(_hparams['learn_rate_opt'],
                       beta_1=0.9,
                       beta_2=0.999,
                       amsgrad=False)

    autoencoder.compile(optimizer=opt_net, loss=_hparams['loss_func'])

    return autoencoder
コード例 #11
0
def get_optimizer(optimizer='sgd', learning_rate=0.1, momentum=0.9, log=True):
    """Create an optimizer and wrap it for Horovod distributed training. Default is SGD."""
    if log:
        print('Creating optimizer on rank ' + str(hvd.rank()))
    opt = None
    if optimizer == 'sgd+nesterov':
        opt = SGD(lr=learning_rate, momentum=momentum, nesterov=True)
    elif optimizer == 'rmsprop':
        opt = RMSprop(lr=learning_rate, rho=0.9)
    elif optimizer == 'adam':
        opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, amsgrad=False)
    elif optimizer == 'adadelta':
        opt = Adadelta(lr=learning_rate, rho=0.95)
    else:
        opt = SGD(lr=learning_rate, momentum=momentum, nesterov=False)
    # Wrap optimizer for data distributed training
    return hvd.DistributedOptimizer(opt)
コード例 #12
0
def model_fn(image_shape, input_name):
    inputs = Input(shape=image_shape, name=input_name)
    x = Conv2D(32, (3, 3), activation='relu')(inputs)
    x = Conv2D(64, (3, 3), activation='relu')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.25)(x)
    x = Flatten()(x)
    x = Dense(128, activation='relu')(x)
    x = Dropout(0.5)(x)
    y = Dense(7, activation='softmax')(x)

    model = Model(inputs=inputs, outputs=y)

    model.compile(optimizer = Adadelta(), 
                  loss='categorical_crossentropy', 
                  metrics=['accuracy'])

    return model
コード例 #13
0
def train_evaluate():

    #Create a keras model
    network_model = model.network(FLAGS.hidden_units)
    loss = 'sparse_categorical_crossentropy'
    metrics = ['accuracy']
    opt = Adadelta()
    network_model.compile(loss=loss, optimizer=opt, metrics=metrics)

    #Convert the the keras model to tf estimator
    estimator = model_to_estimator(keras_model=network_model,
                                   model_dir=FLAGS.job_dir)

    #Create training, evaluation, and serving input functions
    train_input_fn = lambda: input_fn(data_file=FLAGS.training_file,
                                      is_training=True,
                                      batch_size=FLAGS.batch_size,
                                      num_parallel_calls=FLAGS.
                                      num_parallel_calls)

    valid_input_fn = lambda: input_fn(data_file=FLAGS.training_file,
                                      is_training=False,
                                      batch_size=FLAGS.batch_size,
                                      num_parallel_calls=FLAGS.
                                      num_parallel_calls)

    #Create training and validation specifications
    train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
                                        max_steps=FLAGS.max_steps)

    export_latest = tf.estimator.FinalExporter("image_classifier",
                                               serving_input_fn)

    eval_spec = tf.estimator.EvalSpec(input_fn=valid_input_fn,
                                      steps=None,
                                      throttle_secs=FLAGS.throttle_secs,
                                      exporters=export_latest)

    #Start training
    tf.logging.set_verbosity(FLAGS.verbosity)
    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
コード例 #14
0
ファイル: train_evaluate.py プロジェクト: jarokaz/AMLWorkshop
def VGG16base():
  
    input = Input(shape=IMAGE_SHAPE, name=INPUT_NAME)
    conv_base = VGG16(weights='imagenet',
                   include_top=False,
                   input_tensor=input)
    
    for layer in conv_base.layers:
        layer.trainable = False

    a = Flatten()(conv_base.output)
    a = Dense(256, activation='relu')(a)
    y = Dense(NUM_CLASSES, activation='softmax')(a)
    
    model = Model(inputs=input, outputs=y)
    
    model.compile(loss='categorical_crossentropy',
                  optimizer=Adadelta(),
                  metrics=['acc'])
    
    return model
コード例 #15
0
def train_evaluate():

    # Generate training and validation data generators
    def get_image_list(data_dir):
        dataset = []
        for folder in os.listdir(data_dir):
            for image in os.listdir(os.path.join(data_dir, folder)):
                dataset.append((os.path.join(data_dir, folder, image), folder))
        return dataset

    training_data = ImageSequence(
        get_image_list(os.path.join(FLAGS.data_dir, 'train')),
        FLAGS.batch_size, True)
    validation_data = ImageSequence(
        get_image_list(os.path.join(FLAGS.data_dir, 'test')), FLAGS.batch_size,
        False)

    # Create a model
    model = network_model(FLAGS.hidden_units)
    loss = 'categorical_crossentropy'
    optimizer = Adadelta()
    metrics = ['acc']
    model.compile(optimizer, loss, metrics)

    # Start training
    tensorboard = TensorBoard(log_dir=FLAGS.log_dir)
    model.fit_generator(generator=training_data,
                        validation_data=validation_data,
                        epochs=FLAGS.epochs,
                        use_multiprocessing=True,
                        workers=4,
                        callbacks=[tensorboard],
                        verbose=1)

    # Save the model
    model.save(FLAGS.save_model_path)
コード例 #16
0
test = h5py.File(validation_file)
images = test['images'].value
labels = test['labels'].value

y_test = tf.keras.utils.to_categorical(labels, 7)
x_test = images/255

inputs = Input(shape=(112, 112, 3))
x = Conv2D(32, (3, 3), activation='relu')(inputs)
x = Conv2D(64, (3, 3), activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.25)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
x = Dropout(0.5)(x)
y = Dense(7, activation='softmax')(x)

model = Model(inputs=inputs, outputs=y)

model.compile(optimizer = Adadelta(), 
              loss='categorical_crossentropy', 
              metrics=['accuracy'])

tensorboard = TensorBoard(log_dir='../logs/{0}'.format(time()))

model.fit(x_train, y_train, batch_size=32, epochs=25, verbose=1, callbacks=[tensorboard])


model.save(model_file)

コード例 #17
0
#IMPORTANT
"""
frame = totoOriginal.readframes(1)
print(frame)
int_values = [x for x in frame]
print(int_values)
print(bytes(int_values))
#"""

#encoding_dim = totoOriginal.getsampwidth()
#input_img = Input(shape=(784,))

regularEncoder = enc.getEncoder(40, 16)

optimizer = Adadelta(lr=2.0, rho=0.75)

regularEncoder.compile(optimizer,
                       loss='mean_squared_error',
                       metrics=['accuracy'])
totoInput = processor.getTrainingSet(totoOriginal,
                                     40,
                                     normalize=True,
                                     normalization_factor=float(amplitude))
print(totoInput.shape)
print(totoInput[0].shape)
totoOutput = processor.getTrainingSet(totoCover,
                                      40,
                                      normalize=True,
                                      normalization_factor=float(amplitude))
コード例 #18
0
ファイル: cnn.py プロジェクト: mattscchan/551p4
def main(args):
    if args.testing:
        subset = 1000
        teststring = '-t'
    else:
        subset = None
        teststring = ''

    modelpath = 'model/' + args.dataset + '/'
    datapath = 'data/csv/' + args.dataset + '_dataset/train.csv'
    vecpath = 'data/word2vec/' + args.dataset + '_combined_word2vec'

    # CONSTANTS
    data_num_classes = 2
    savepath = modelpath + args.model + '/best' + teststring + '.hdf5'
    alphabet = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}"

    # HYPERPARAMETERS
    filter_size = 3
    if args.model == 'scnn':
        data_features = 300
        filter_num = 100
        dropout_rate = 0.5
        mini_batch = 50
        num_epochs = int(args.epochs)
        filter_blocks = None
        optimizer = Adadelta(lr=0.5)
    elif args.model == 'dcnn':
        data_features = 16
        filter_num = 64
        dropout_rate = 0
        mini_batch = 128
        num_epochs = int(args.epochs)
        filter_blocks = [10, 10, 4, 4]
        optimizer = SGD(momentum=0.9)

    print('Loading Dataset')
    x, y, idx, vec, data_sequence = load_data(args.dataset,
                                              args.model,
                                              datapath,
                                              vecpath,
                                              alphabet,
                                              subset=subset)
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.20)

    if args.model == 'scnn':
        vocab_size = len(idx) + 1
        embeddings = np.zeros((vocab_size, data_features))
        for w, i in idx.items():
            embeddings[i, :] = vec[w]
        print('Embeddings shape: ', embeddings.shape)
    elif args.model == 'dcnn':
        vocab_size = len(alphabet) + 1 + 1
        embeddings = None

    print('Training Model', args.model.upper() + '(' + args.dataset + ')')
    print(y_train.shape, y_train[0])
    network = CNN(x=x_train,
                  y=y_train,
                  data_features=data_features,
                  data_sequence=data_sequence,
                  num_labels=data_num_classes,
                  vocab_size=vocab_size,
                  embeddings=embeddings,
                  trainable=args.model == 'dcnn',
                  savepath=savepath,
                  saved=args.saved)
    if not args.saved:
        network.graph(type=args.model,
                      filter_num=filter_num,
                      num_filter_block=filter_blocks,
                      dropout_rate=dropout_rate)
    network.train(optimizer=optimizer,
                  num_epochs=num_epochs,
                  mini_batch=mini_batch)
    print('')
    print('Testing on: %d', len(y_test))
    network.test(x=x_test, y=y_test)
コード例 #19
0
emb = concatenate(emb_list)

emb = Flatten()(emb)

x = Dense(100, activation='relu', kernel_regularizer=reg2)(emb)
x = Dropout(.7)(x)
x = Dense(100, activation='relu', kernel_regularizer=reg2)(x)
x = Dropout(.7)(x)
x = Dense(1, activation='sigmoid', kernel_regularizer=reg2)(x)

model = Model(inputs=[emb_input[i] for i in range(DF_train.shape[1])],
              outputs=[x])

model.summary()

optimizer = Adadelta(lr=3.0, rho=0.95, epsilon=None, decay=0.0)

model.compile(optimizer=optimizer, loss=['binary_crossentropy'])

model.fit([DF_train[i] for i in list(DF_train)],
          y_train,
          epochs=60,
          batch_size=4096,
          shuffle=True)

Y_pred = model.predict([DF_train[i] for i in list(DF_train)])
y_pred = model.predict([DF_test[i] for i in list(DF_train)])

print(roc_auc_score(y_test, y_pred))
print(roc_auc_score(y_train, Y_pred))