コード例 #1
0
    def model_cnn(self):
        """ 
            a). Model Trained From Scratch With 3 CNN and 3 Max Pool Layers 
            b). Model Compliation with RMSProp Algorithm and binary_crossentropy loss function
            
        """

        from tensorflow.keras.optimizers import RMSprop
        l1_l2 = tf.keras.regularizers.l1_l2(l1=0.01, l2=0.01)

        model = tf.keras.models.Sequential([
            tf.keras.layers.InputLayer(input_shape=self.input_dim),
            tf.keras.layers.Lambda(lambda x: tf.image.resize(x, [100, 100])),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.Dropout(self.dropout),
            tf.keras.layers.Conv2D(16, (1, 1), activation='relu'),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.Conv2D(32, (1, 1), activation='relu'),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.Conv2D(64, (1, 1), activation='relu'),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.Conv2D(128, (2, 2), activation='relu'),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.MaxPooling2D(2, 2),
            tf.keras.layers.Dropout(self.dropout),
            tf.keras.layers.Conv2D(256, (2, 2), activation='relu'),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.MaxPooling2D(2, 2),
            tf.keras.layers.Dropout(self.dropout),
            tf.keras.layers.Conv2D(512, (2, 2), activation='relu'),
            tf.keras.layers.Dropout(self.dropout),
            tf.keras.layers.Conv2D(512, (2, 2), activation='relu'),
            tf.keras.layers.Dropout(self.dropout),
            tf.keras.layers.Conv2D(1024, (2, 2), activation='relu'),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.Conv2D(1024, (2, 2), activation='relu'),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.Conv2D(1024, (2, 2), activation='relu'),
            tf.keras.layers.Dropout(self.dropout),
            tf.keras.layers.Conv2D(2048, (3, 3), activation='relu'),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.Conv2D(2048, (3, 3), activation='relu'),
            tf.keras.layers.Dropout(self.dropout),
            tf.keras.layers.Conv2D(2048, (3, 3), activation='relu'),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.Dropout(self.dropout),
            tf.keras.layers.MaxPooling2D(2, 2),
            tf.keras.layers.Dropout(self.dropout),
            tf.keras.layers.Flatten(),
            tf.keras.layers.Dense(1024, activation='relu'),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.Dense(1024, activation='relu'),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.Dropout(self.dropout),
            tf.keras.layers.Dense(1024, activation='relu'),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.Dense(512, activation='relu'),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.Dropout(self.dropout),
            tf.keras.layers.Dense(512, activation='relu'),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.Dense(512, activation='relu'),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.Dense(256, activation='relu'),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.Dense(256, activation='relu'),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.Dense(128, activation='relu'),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.Dropout(self.dropout),
            tf.keras.layers.Dense(128, activation='relu'),
            tf.keras.layers.BatchNormalization(axis=-1),
            tf.keras.layers.Dense(64, activation='relu'),
            tf.keras.layers.Dense(32, activation='relu'),
            tf.keras.layers.Dense(1, activation='sigmoid')
        ])
        model.summary()

        model.compile(optimizer=RMSprop(0.001),
                      loss='binary_crossentropy',
                      metrics=['acc'])

        return model
コード例 #2
0
# x = layers.Dense(1024, activation='relu')(x)
# # Add a dropout rate of 0.2
# x = layers.Dropout(0.2)(x)
# # Add a final sigmoid layer for classification
# x = layers.Dense(7, activation='sigmoid')(x)

# # Configure and compile the model
# model = Model(pre_trained_model.input, x)
# model.compile(loss='categorical_crossentropy',
#               optimizer=RMSprop(lr=0.0001),
#               metrics=['acc'])

# Configure and compile the model
model = Model(img_input, output)
model.compile(loss='categorical_crossentropy',
              optimizer=RMSprop(lr=0.001),
              metrics=['acc'])

# Train the model
history = model.fit_generator(train_generator,
                              steps_per_epoch=66,
                              epochs=100,
                              validation_data=validation_generator,
                              validation_steps=33,
                              verbose=2)

# Retrieve a list of accuracy results on training and test data
# sets for each training epoch
acc = history.history['acc']
val_acc = history.history['val_acc']
コード例 #3
0
model = ResNet101(
    include_top=True,
    input_shape=(128, 862, 1),
    classes=2,
    pooling=None,
    weights=None,
)

model.summary()
# model.trainable = False

model.save('C:/nmb/nmb_data/h5/5s/densenet/resnet_rmsprop_1.h5')

# 컴파일, 훈련
op = RMSprop(lr=1e-3)
batch_size = 4

es = EarlyStopping(monitor='val_loss',
                   patience=20,
                   restore_best_weights=True,
                   verbose=1)
lr = ReduceLROnPlateau(monitor='val_loss', vactor=0.5, patience=10, verbose=1)
path = 'C:/nmb/nmb_data/h5/5s/densenet/resnet_rmsprop_1.h5'
mc = ModelCheckpoint(path, monitor='val_loss', verbose=1, save_best_only=True)

model.compile(optimizer=op,
              loss="sparse_categorical_crossentropy",
              metrics=['acc'])
history = model.fit(x_train,
                    y_train,
コード例 #4
0
        if version == 0:   #NO USADO
        #    m = Model(input=[x, x_maps], output=sam_vgg([x, x_maps]))
            print("Not Compiling SAM-VGG")   #Nueva version
        #    m.compile(RMSprop(lr=1e-4), loss=[kl_divergence, correlation_coefficient, nss])
        elif version == 1:
            '''Hint of the problem: something is not the output of a keras layer. 
            You should put it in a lambda layer
            When invoking the Model API, the value for outputs argument should 
            be tensor(or list of tensors), in this case it is a list of list of 
            tensors, hence there is a problem'''
            #m = Model(input=[x, x_maps], output=sam_resnet([x, x_maps]))
            #m = Model(inputs=[x, x_maps], outputs=sam_resnet([x, x_maps]))  #New version
            m = ModelAux(inputs=[x, x_maps], outputs=sam_resnet([x, x_maps])) #Final version

            print("Compiling SAM-ResNet")
            m.compile(RMSprop(lr=1e-4), 
                      loss=[kl_divergence, correlation_coefficient, nss])
            print("Compilado")
        else:
            raise NotImplementedError

        if phase == 'train':
            if nb_imgs_train % b_s != 0 or nb_imgs_val % b_s != 0:
                print("The number of training and validation images should be a multiple of the batch size. Please change your batch size in config.py accordingly.")
                exit()

            if version == 0:
                print("Training SAM-VGG")
                m.fit_generator(generator(b_s=b_s), nb_imgs_train, nb_epoch=nb_epoch,
                                validation_data=generator(b_s=b_s, phase_gen='val'), nb_val_samples=nb_imgs_val,
                                callbacks=[EarlyStopping(patience=3),
コード例 #5
0
print(x_train.shape, y_train.shape)  # (3628, 128, 862, 1) (3628,)
print(x_test.shape, y_test.shape)  # (908, 128, 862, 1) (908,)

model = MobileNet(
    include_top=True,
    input_shape=(128, 862, 1),
    classes=2,
    pooling=None,
    weights=None,
)
model.summary()
# model.trainable = False
model.save('C:\\nmb\\nmb_data\\h5\\pre_train\\mobilenet_rmsprop_.h5')

# 컴파일, 훈련
op = RMSprop(lr=1e-4)
batch_size = 32
es = EarlyStopping(monitor='val_loss',
                   patience=20,
                   restore_best_weights=True,
                   verbose=1)
lr = ReduceLROnPlateau(monitor='val_loss', vactor=0.5, patience=10, verbose=1)
path = 'C:\\nmb\\nmb_data\\h5\\pre_train\\mobilenet_rmsprop_1e4.h5'
mc = ModelCheckpoint(path, monitor='val_loss', verbose=1, save_best_only=True)
tb = TensorBoard(log_dir='C:/nmb/nmb_data/graph/' + 'mobilenet_rmsprop_1e4' +
                 "/",
                 histogram_freq=0,
                 write_graph=True,
                 write_images=True)
model.compile(optimizer=op,
              loss="sparse_categorical_crossentropy",
コード例 #6
0
ファイル: train.py プロジェクト: foolmarks/densenet
def train(opt,batchsize,learnrate,epochs,keras_hdf5,tboard):

    
    def step_decay(epoch):
        """
        Learning rate scheduler used by callback
        Reduces learning rate depending on number of epochs
        """
        lr = learnrate
        if epoch > 120:
            lr /= 1000
        elif epoch > 90:
            lr /= 100
        elif epoch > 60:
            lr /= 10
        elif epoch > 20:
            lr /= 2
        return lr
    

    '''
    -------------------------------------------------------------------
    DATASET PREPARATION
    50k images used for training, 8k for validation and 2k for evaluation
    -------------------------------------------------------------------
    '''
    print('\nDATASET PREPARATION:')
    # CIFAR10 dataset has 60k images. Training set is 50k, test set is 10k.
    # Each image is 32x32x8bits
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()

    # Scale image data from range 0:255 to range 0.0:1.0
    # Also converts train & test data to float from uint8
    x_train = (x_train/255.0).astype(np.float32)
    x_test = (x_test/255.0).astype(np.float32)

    # one-hot encode the labels
    y_train = to_categorical(y_train, num_classes=10)
    y_test = to_categorical(y_test, num_classes=10)

    # hold back 2k samples from test set for evaluation
    # Note: this does not guarantee a balanced split across all classes
    x_eval = x_test[8000:]
    y_eval = y_test[8000:]
    x_test = x_test[:8000]
    y_test = y_test[:8000]


    '''
    -------------------------------------------------------------------
    NETWORK 
    Create the model, print its structure
    densenet function arguments are for CIFAR-10/100 use case
    -------------------------------------------------------------------
    '''
    model = densenet(input_shape=(32,32,3),classes=10,k=12,drop_rate=0.2,theta=0.5,weight_decay=1e-4,convlayers=[16,16,16])

    print('\n'+DIVIDER)
    print(' Model Summary')
    print(DIVIDER)
    print(model.summary())
    print("Model Inputs: {ips}".format(ips=(model.inputs)))
    print("Model Outputs: {ops}".format(ops=(model.outputs)))


    '''
    -------------------------------------------------------------------
    CREATE CALLBACKS
    -------------------------------------------------------------------
    '''
    chkpt_call = ModelCheckpoint(filepath=keras_hdf5,
                                 monitor='val_accuracy',
                                 verbose=1,
                                 save_best_only=True)
    
    
    tb_call = TensorBoard(log_dir=tboard,                          
                          update_freq='epoch')
    

    lr_scheduler_call = LearningRateScheduler(schedule=step_decay,
                                              verbose=1)

    lr_plateau_call = ReduceLROnPlateau(factor=np.sqrt(0.1),
                                        cooldown=0,
                                        patience=5,
                                        min_lr=0.5e-6)

    callbacks_list = [tb_call, lr_scheduler_call, lr_plateau_call, chkpt_call]


    '''
    -------------------------------------------------------------------
    TRAINING
    Training data will be augmented:
      - random rotation
      - random horiz flip
      - random linear shift up and down
      - random shear & zoom
    -------------------------------------------------------------------
    '''

    data_augment = ImageDataGenerator(rotation_range=10,
                                      horizontal_flip=True,
                                      height_shift_range=0.1,
                                      width_shift_range=0.1,
                                      shear_range=0.1,
                                      zoom_range=0.1)

    train_generator = data_augment.flow(x=x_train,
                                        y=y_train,
                                        batch_size=batchsize,
                                        shuffle=True)
                                  

    # Optimizer
    if (opt=='rms'):
        # RMSprop optimizer
        opt = RMSprop(lr=learnrate)
    else:
        #SGD optimizer with Nesterov momentum as per original paper
        opt = SGD(lr=learnrate, momentum=0.9, nesterov=True)

    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])


    print('\n'+DIVIDER)
    print(' Training model with training set..')
    print(' Using',opt,'optimizer..')
    print(DIVIDER)


    # run training
    model.fit(x=train_generator,
              epochs=epochs,
              steps_per_epoch=train_generator.n//train_generator.batch_size,
              validation_data=(x_test, y_test),
              callbacks=callbacks_list,
              verbose=1)


    print("\nTensorBoard can be opened with the command: tensorboard --logdir={dir} --host localhost --port 6006".format(dir=tboard))


    '''
    -------------------------------------------------------------------
    EVALUATION
    -------------------------------------------------------------------
    '''

    print('\n'+DIVIDER)
    print(' Evaluate model accuracy with validation set..')
    print(DIVIDER)

    scores = model.evaluate(x_eval, y_eval, verbose=1)
    print ('Evaluation Loss    : ', scores[0])
    print ('Evaluation Accuracy: ', scores[1])

    return
        x = Flatten()(x)

        x = Dense(1024)(x)
        x = Activation("relu")(x)
        x = BatchNormalization(axis=chanDim)(x)
        x = Dropout(0.5)(x)

        x = Dense(1)(x)

        predictions = Activation("sigmoid")(x)

        # create the model
        sample_cnn = Model(inputs=images, outputs=predictions)

        # compile the model so that it uses RMSprop
        sample_cnn.compile(optimizer=RMSprop(lr=lr, decay=lr / epochs),
                           loss="binary_crossentropy",
                           metrics=["acc"])

        print(sample_cnn.summary())

except RuntimeError as e:
    print(e)

start = time.time()
try:
    with tf.device('/device:GPU:7'):
        # train model
        history = sample_cnn.fit(train_x,
                                 train_y,
                                 validation_data=(test_x, test_y),
コード例 #8
0
x = Conv3D(32,(3,3,3),strides=(2,2,2),activation=None)(x)
x = LeakyReLU(alpha=0.1)(x)
x = BatchNormalization()(x)
x = Conv3D(32,(3,3,3),strides=(2,2,2),activation=None)(x)
x = LeakyReLU(alpha=0.1)(x)
x = BatchNormalization()(x)
x = Flatten()(x)
x = Dropout(.5)(x)

x = Dense(32,activation=None)(x)
x = LeakyReLU(alpha=0.1)(x)
x = Dense(16,activation=None)(x)
x = LeakyReLU(alpha=0.1)(x)
outputs = Dense(1,activation=None)(x)
model = Model(inputs=inputs,outputs=outputs) 
model.compile(RMSprop(),loss='mean_squared_error')

model.summary()

history = model.fit_generator(
	train_gen, 
	steps_per_epoch=int(video_size*split/batch_size), 
	validation_data=val_gen, 
	validation_steps=int(video_size*(1-split)/batch_size),
	epochs=epochs,
	verbose=True,
	callbacks=[ModelCheckpoint('./data/weights.hdf5',save_best_only=True)])

model2 = load_model(filepath='./data/weights.hdf5')
model2.compile(RMSprop(),loss='mean_squared_error')
コード例 #9
0
ファイル: day628-yuce-rnn.py プロジェクト: edclol/study
# 训练并评估一个使用 dropout 正则化的堆叠 GRU 模型
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras.optimizers import RMSprop

model = Sequential()
model.add(
    layers.GRU(32,
               dropout=0.1,
               recurrent_dropout=0.5,
               return_sequences=True,
               input_shape=(None, float_data.shape[-1])))
model.add(layers.GRU(64, activation='relu', dropout=0.1,
                     recurrent_dropout=0.5))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
model.summary()
history = model.fit_generator(train_gen,
                              steps_per_epoch=500,
                              epochs=40,
                              validation_data=val_gen,
                              validation_steps=val_steps)

# 使用逆序序列训练并评估一个 LSTM
# from tensorflow.keras.preprocessing import sequence
# from tensorflow.keras.datasets import imdb
#
# max_features = 10000
# maxlen = 500
#
# (x_train, y_train), (x_test, y_test) = imdb.load_data(
コード例 #10
0
model_regr.add(InputLayer(input_shape=(num_signals, )))

# Add several dense aka. fully-connected layers.
# You can experiment with different designs.
model_regr.add(Dense(128, activation=activation))
model_regr.add(Dense(64, activation=activation))
model_regr.add(Dense(32, activation=activation))
model_regr.add(Dense(16, activation=activation))
model_regr.add(Dense(8, activation=activation))

# Add a layer for the output of the Neural Network.
# This is 1-dimensional to match the stock-return data.
model_regr.add(Dense(1))

# Compile the model but don't train it yet.
model_regr.compile(loss='mse', metrics=['mae'], optimizer=RMSprop(0.001))

# Show the model.
model_regr.summary()

fit_args = \
    {
        # For efficiency, the model is trained on batches of data.
        'batch_size': 4096,

        # Number of iterations aka. epochs over the training-set.
        'epochs': 40,

        # Fraction of the training-set used for validation after
        # each training-epoch, to assess how well the model performs
        # on unseen data.
コード例 #11
0
import csv
import pickle
import numpy as np
from tensorflow.keras.models import model_from_yaml
from tensorflow.keras.optimizers import RMSprop

if __name__ == '__main__':

    #load model
    with open('model/model.yaml', 'r') as f:
        model = model_from_yaml(f.read())
    model.load_weights('model/model_weight.h5')

    print('Loaded model.')

    opt = RMSprop(lr=0.00001)
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    #get data
    metadata = pickle.load(open('data/test.pickle', 'rb'))
    x_test = metadata['x']

    # model.predict(x_test)

    #evaluate
    print('Evaluate model.')
    result = {'y': model.predict(x_test), 'y_name': metadata['filename']}

    with open('data/sample_submission.csv', 'r') as r, open('result.csv',
コード例 #12
0
def train_happy_sad_model():

    mf_callbacks = modelfit_callback()

    # Define and Compile the Model.images are 150 X 150
    model = tf.keras.models.Sequential([
        # input shape with size of the image 150x150 with 3 bytes color
        # 1st convolution
        tf.keras.layers.Conv2D(16, (3, 3),
                               activation='relu',
                               input_shape=(150, 150, 3)),
        tf.keras.layers.MaxPooling2D(2, 2),
        # 2nd convolution
        tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
        tf.keras.layers.MaxPooling2D(2, 2),
        # 3rd convolution
        tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
        tf.keras.layers.MaxPooling2D(2, 2),
        # 4th convolution
        # tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
        # tf.keras.layers.MaxPooling2D(2,2),
        # 5th convolution
        # tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
        # tf.keras.layers.MaxPooling2D(2,2),
        # Flatten the results to feed into a DNN
        tf.keras.layers.Flatten(),
        # 512 neuron hidden layer
        tf.keras.layers.Dense(512, activation='relu'),
        # Only 1 output neuron. It will contain a value from 0-1 where 0 for class ('sad') and 1 for class ('happy')
        tf.keras.layers.Dense(1, activation='sigmoid')
    ])

    model.compile(
        loss='binary_crossentropy',
        # larning rate
        optimizer=RMSprop(lr=0.001),
        metrics=['accuracy'])

    # create an instance of an ImageDataGenerator called train_datagen
    # And a train_generator by calling train_datagen.flow_from_directory

    # All images will be rescaled by 1./255
    train_datagen = ImageDataGenerator(rescale=1 / 255)

    # use a target_size of 150 X 150.
    # Flow training images in batches of 128 using train_datagen generator
    train_generator = train_datagen.flow_from_directory(
        f'{ROOT_DIR}/data/h-or-s',  # This is the source directory for training images
        target_size=(150, 150),  # All images will be resized to 150x150
        batch_size=128,
        class_mode=
        'binary'  # Since we use binary_crossentropy loss, we need binary labels
    )

    # Expected output: 'Found 80 images belonging to 2 classes'

    # call model.fit and train for a number of epochs.
    # model.fit_generator is deprecated . model.fit supports generator
    history = model.fit(
        train_generator,
        #steps_per_epoch=8,
        epochs=15,
        verbose=1,
        callbacks=[mf_callbacks])

    return history.history['accuracy'][-1]
コード例 #13
0
    model.add(Dropout(0.4))
    model.add(Dense(1, activation='sigmoid'))
    return model


def build_gan(generator, discriminator):
    model = Sequential()
    # Combined Generator -> Discriminator model
    model.add(generator)
    model.add(discriminator)
    return model


discriminator = build_discriminator(img_shape)
discriminator.compile(loss='binary_crossentropy',
                      optimizer=RMSprop(lr=0.00008, clipvalue=1.0, decay=1e-8),
                      metrics=['accuracy'])

generator = build_generator(z_dim)

# Keep Discriminator’s parameters constant for Generator training
discriminator.trainable = False

gan = build_gan(generator, discriminator)
gan.compile(loss='binary_crossentropy',
            optimizer=RMSprop(lr=0.0004, clipvalue=1.0, decay=1e-8))


losses = []
accuracies = []
iteration_checkpoints = []
コード例 #14
0
def rnn_gha(sim_dict_path,
            gha_incorrect=True,
            use_dataset='train_set',
            get_layer_list=None,
            exp_root='/home/nm13850/Documents/PhD/python_v2/experiments/',
            verbose=False,
            test_run=False):
    """
    gets activations from hidden units.

    1. load simulation dict (with data info) (*_load_dict.pickle)
        sim_dict can be fed in from sim script, or loaded separately
    2. load model - get structure and details
    3. run dataset through once, recording accuracy per item/class
    4. run on 2nd model to get hid acts

    :param sim_dict_path: path to the dictionary for this experiment condition
    :param gha_incorrect: GHA for ALL items (True) or just correct items (False)
    :param use_dataset: GHA for train/test data
    :param get_layer_list: if None, gha all layers, else list of layer names to gha
    :param exp_root: root to save experiments
    :param verbose:
    :param test_run: Set test = True to just do one unit per layer

    :return: dict with hid acts per layer.  saved as dict so different shaped arrays don't matter too much
    """

    print('**** ff_gha GHA() ****')

    # # # PART 1 # # #
    # # load details from dict
    if type(sim_dict_path) is str:
        if os.path.isfile(sim_dict_path):
            print(f"sim_dict_path: {sim_dict_path}")
            sim_dict = load_dict(sim_dict_path)
            full_exp_cond_path, sim_dict_name = os.path.split(sim_dict_path)

        elif os.path.isfile(os.path.join(exp_root, sim_dict_path)):
            sim_dict_path = os.path.join(exp_root, sim_dict_path)
            print(f"sim_dict_path: {sim_dict_path}")
            sim_dict = load_dict(sim_dict_path)
            full_exp_cond_path, sim_dict_name = os.path.split(sim_dict_path)

    elif type(sim_dict_path) is dict:
        sim_dict = sim_dict_path
        sim_dict_path = sim_dict['training_info']['sim_dict_path']
        full_exp_cond_path = sim_dict['topic_info']['exp_cond_path']

    else:
        raise FileNotFoundError(sim_dict_path)

    os.chdir(full_exp_cond_path)
    print(f"set_path to full_exp_cond_path: {full_exp_cond_path}")

    focussed_dict_print(sim_dict, 'sim_dict')

    # # # load datasets
    data_dict = sim_dict['data_info']
    if use_dataset is 'generator':
        vocab_dict = load_dict(
            os.path.join(data_dict["data_path"], data_dict["vocab_dict"]))
        n_cats = data_dict["n_cats"]
        x_data_path = sim_dict['training_info']['x_data_path']
        # y_data_path = sim_dict['training_info']['y_data_path']
        # n_items = 'unknown'

    else:
        # load data from somewhere
        # n_items = data_dict["n_items"]
        n_cats = data_dict["n_cats"]
        hdf5_path = sim_dict['topic_info']["dataset_path"]

        x_data_path = hdf5_path
        # y_data_path = '/home/nm13850/Documents/PhD/python_v2/datasets/' \
        #               'objects/ILSVRC2012/imagenet_hdf5/y_df.csv'

        seq_data = pd.read_csv(data_dict["seqs"],
                               header=None,
                               names=['seq1', 'seq2', 'seq3'])
        print(f"\nseq_data: {seq_data.shape}\n{seq_data.head()}")

        x_data = np.load(data_dict["x_data"])
        print("\nshape of x_data: {}".format(np.shape(x_data)))

        y_labels = np.loadtxt(data_dict["y_labels"],
                              delimiter=',').astype('int8')
        print(f"\ny_labels:\n{y_labels}")
        print(np.shape(y_labels))

        y_data = to_categorical(y_labels, num_classes=30)
        print(f"\ny_data:\n{y_data}")
        print(np.shape(y_data))

    # # # data preprocessing
    # # # if network is cnn but data is 2d (e.g., MNIST)
    # if len(np.shape(x_data)) != 4:
    #     if sim_dict['model_info']['overview']['model_type'] == 'cnn':
    #         width, height = sim_dict['data_info']['image_dim']
    #         x_data = x_data.reshape(x_data.shape[0], width, height, 1)
    #         print(f"\nRESHAPING x_data to: {np.shape(x_data)}")

    # # other details
    # hid_units = sim_dict['model_info']['layers']['hid_layers']['hid_totals']["analysable"]
    optimizer = sim_dict['model_info']["overview"]["optimizer"]
    loss_func = sim_dict['model_info']["overview"]["loss_func"]
    batch_size = sim_dict['model_info']["overview"]["batch_size"]
    timesteps = sim_dict['model_info']["overview"]["timesteps"]
    serial_recall = sim_dict['model_info']["overview"]["serial_recall"]
    x_data_type = sim_dict['model_info']["overview"]["x_data_type"]
    end_seq_cue = sim_dict['model_info']["overview"]["end_seq_cue"]
    act_func = sim_dict['model_info']["overview"]["act_func"]

    # input_dim = data_dict["X_size"]
    # output_dim = data_dict["n_cats"]

    # Output files
    output_filename = sim_dict["topic_info"]["output_filename"]
    print(f"\nOutput file: {output_filename}")

    # # # # PART 2 # # #
    print("\n**** THE MODEL ****")
    model_name = sim_dict['model_info']['overview']['trained_model']

    if os.path.isfile(model_name):
        loaded_model = load_model(model_name)
    else:
        training_dir, sim_dict_name = os.path.split(sim_dict_path)
        print(f"training_dir: {training_dir}\n"
              f"sim_dict_name: {sim_dict_name}")
        if os.path.isfile(os.path.join(training_dir, model_name)):
            loaded_model = load_model(os.path.join(training_dir, model_name))

    loaded_model.trainable = False

    model_details = loaded_model.get_config()
    # print_nested_round_floats(model_details)
    focussed_dict_print(model_details, 'model_details')

    n_layers = len(model_details['layers'])
    model_dict = dict()

    # # turn off "trainable" and get useful info

    for layer in range(n_layers):
        # set to not train
        # model_details['layers'][layer]['config']['trainable'] = 'False'

        if verbose:
            print(f"Model layer {layer}: {model_details['layers'][layer]}")

        # # get useful info
        layer_dict = {
            'layer': layer,
            'name': model_details['layers'][layer]['config']['name'],
            'class': model_details['layers'][layer]['class_name']
        }

        if 'units' in model_details['layers'][layer]['config']:
            layer_dict['units'] = model_details['layers'][layer]['config'][
                'units']
        if 'activation' in model_details['layers'][layer]['config']:
            layer_dict['act_func'] = model_details['layers'][layer]['config'][
                'activation']
        if 'filters' in model_details['layers'][layer]['config']:
            layer_dict['filters'] = model_details['layers'][layer]['config'][
                'filters']
        if 'kernel_size' in model_details['layers'][layer]['config']:
            layer_dict['size'] = model_details['layers'][layer]['config'][
                'kernel_size'][0]
        if 'pool_size' in model_details['layers'][layer]['config']:
            layer_dict['size'] = model_details['layers'][layer]['config'][
                'pool_size'][0]
        if 'strides' in model_details['layers'][layer]['config']:
            layer_dict['strides'] = model_details['layers'][layer]['config'][
                'strides'][0]
        if 'rate' in model_details['layers'][layer]['config']:
            layer_dict["rate"] = model_details['layers'][layer]['config'][
                'rate']

        # # set and save layer details
        model_dict[layer] = layer_dict

    # # my model summary
    model_df = pd.DataFrame.from_dict(
        data=model_dict,
        orient='index',
        columns=[
            'layer', 'name', 'class', 'act_func', 'units', 'filters', 'size',
            'strides', 'rate'
        ],
    )

    print(f"\nmodel_df\n{model_df}")

    # # make new df with just layers of interest
    if get_layer_list is None:
        key_layers_df = model_df
        get_layer_list = key_layers_df['name'].tolist()

    key_layers_df = model_df.loc[model_df['name'].isin(get_layer_list)]

    key_layers_df.reset_index(inplace=True)
    del key_layers_df['index']
    key_layers_df.index.name = 'index'
    key_layers_df = key_layers_df.drop(columns=['size', 'strides', 'rate'])

    # # add column ('n_units_filts')to say how many things needs gha per layer (number of units or filters)
    # # add zeros to rows with no units or filters
    key_layers_df.loc[:, 'n_units_filts'] = key_layers_df.units.fillna(
        0) + key_layers_df.filters.fillna(0)

    # print(f"\nkey_layers_df:\n{key_layers_df}")

    key_layers_df.loc[:,
                      "n_units_filts"] = key_layers_df["n_units_filts"].astype(
                          int)

    # # get to total number of units or filters in key layers of the network
    key_n_units_fils = sum(key_layers_df['n_units_filts'])

    print(f"\nkey_layers_df:\n{key_layers_df.head()}")
    print(f"key_n_units_fils: {key_n_units_fils}")
    '''i currently get output layer, make sure I keep this in to make sure I can do class correlation'''

    # # # set dir to save gha stuff # # #
    hid_act_items = 'all'
    if not gha_incorrect:
        hid_act_items = 'correct'
    gha_folder = f'{hid_act_items}_{use_dataset}_gha'
    if test_run:
        gha_folder = os.path.join(gha_folder, 'test')

    cond_name = sim_dict['topic_info']['output_filename']
    condition_path = find_path_to_dir(long_path=full_exp_cond_path,
                                      target_dir=cond_name)
    gha_path = os.path.join(condition_path, gha_folder)

    if not os.path.exists(gha_path):
        os.makedirs(gha_path)
    os.chdir(gha_path)
    print(f"\nsaving hid_acts to: {gha_path}")

    # # # get hid acts for each timestep even if output is free-recall
    # print("\nchanging layer attribute: return_sequnces")
    # for layer in loaded_model.layers:
    #     # set to return sequences = True
    #     # model_details['layers'][layer]['config']['return_sequences'] = True
    #     if hasattr(layer, 'return_sequences'):
    #         layer.return_sequences = True
    #         print(layer.name, layer.return_sequences)
    #
    #     if verbose:
    #         print(f"Model layer {layer}: {model_details['layers'][layer]}")

    # # sort optimizers
    # # I don't think the choice of optimizer should actually mater since I am not training.
    sgd = SGD(momentum=.9)  # decay=sgd_lr / max_epochs)
    this_optimizer = sgd

    if optimizer == 'SGD_no_momentum':
        this_optimizer = SGD(momentum=0.0,
                             nesterov=False)  # decay=sgd_lr / max_epochs)
    elif optimizer == 'SGD_Nesterov':
        this_optimizer = SGD(momentum=.1,
                             nesterov=True)  # decay=sgd_lr / max_epochs)
    elif optimizer == 'SGD_mom_clip':
        this_optimizer = SGD(momentum=.9,
                             clipnorm=1.)  # decay=sgd_lr / max_epochs)
    elif optimizer == 'dougs':
        print("I haven't added the code for doug's momentum to GHA script yet")
        this_optimizer = None
        # this_optimizer = dougsMomentum(momentum=.9)

    elif optimizer == 'adam':
        this_optimizer = Adam(amsgrad=False)
    elif optimizer == 'adam_amsgrad':
        # simulations run prior to 05122019 did not have this option, and may have use amsgrad under the name 'adam'
        this_optimizer = Adam(amsgrad=True)

    elif optimizer == 'RMSprop':
        this_optimizer = RMSprop()
    elif optimizer == 'Adagrad':
        this_optimizer = Adagrad()
    elif optimizer == 'Adadelta':
        this_optimizer = Adadelta()
    elif optimizer == 'Adamax':
        this_optimizer = Adamax()
    elif optimizer == 'Nadam':
        this_optimizer = Nadam()

    # # # PART 3 get_scores() # # #
    loaded_model.compile(loss=loss_func,
                         optimizer=this_optimizer,
                         metrics=['accuracy'])

    # # load test_seqs if they are there, else generate some
    data_path = sim_dict['data_info']['data_path']

    if not os.path.exists(data_path):
        if os.path.exists(switch_home_dirs(data_path)):
            data_path = switch_home_dirs(data_path)
        else:
            raise FileExistsError(f'data path not found: {data_path}')

    print(f'data_path: {data_path}')

    # test_filename = f'seq{timesteps}_v{n_cats}_960_test_seq_labels.npy'
    test_filename = f'seq{timesteps}_v{n_cats}_1per_ts_test_seq_labels.npy'
    test_seq_path = os.path.join(data_path, test_filename)
    test_label_seqs = np.load(test_seq_path)

    print(f'test_label_seqs: {np.shape(test_label_seqs)}\n{test_label_seqs}\n')

    test_label_name = os.path.join(data_path, test_filename[:-10])

    seq_words_df = pd.read_csv(f"{test_label_name}words.csv")

    # test_IPC_name = os.path.join(data_path, f"seq{timesteps}_v{n_cats}_960_test_IPC.pickle")
    test_IPC_name = os.path.join(
        data_path, f"seq{timesteps}_v{n_cats}_1per_ts_test_IPC.pickle")

    IPC_dict = load_dict(test_IPC_name)

    # else:
    #
    #     n_seqs = 30*batch_size
    #
    #     test_label_seqs = get_label_seqs(n_labels=n_cats, seq_len=timesteps,
    #                                      repetitions=serial_recall, n_seqs=n_seqs)
    #     test_label_name = f"{output_filename}_{np.shape(test_label_seqs)[0]}_test_seq_"
    #
    #
    #     # print(f"test_label_name: {test_label_name}")
    #     np.save(f"{test_label_name}labels.npy", test_label_seqs)
    #
    #     seq_words_df = spell_label_seqs(test_label_seqs=test_label_seqs,
    #                                     test_label_name=f"{test_label_name}words.csv",
    #                                     vocab_dict=vocab_dict, save_csv=True)
    if verbose:
        print(seq_words_df.head())

    scores_dict = get_test_scores(
        model=loaded_model,
        data_dict=data_dict,
        test_label_seqs=test_label_seqs,
        serial_recall=serial_recall,
        x_data_type=x_data_type,
        # output_type=output_type,
        end_seq_cue=end_seq_cue,
        batch_size=batch_size,
        verbose=verbose)

    mean_IoU = scores_dict['mean_IoU']
    prop_seq_corr = scores_dict['prop_seq_corr']

    # IPC_dict = seq_items_per_class(label_seqs=test_label_seqs, vocab_dict=vocab_dict)
    # test_IPC_name = f"{output_filename}_{n_seqs}_test_IPC.pickle"
    # with open(test_IPC_name, "wb") as pickle_out:
    #     pickle.dump(IPC_dict, pickle_out, protocol=pickle.HIGHEST_PROTOCOL)

    # # PART 5
    print("\n**** Get Hidden unit activations ****")
    hid_acts_dict = dict()

    # # loop through key layers df
    gha_key_layers = []
    for index, row in key_layers_df.iterrows():
        if test_run:
            if index > 3:
                continue

        layer_number, layer_name, layer_class = row['layer'], row['name'], row[
            'class']
        print(f"\n{layer_number}. name: {layer_name}; class: {layer_class}")

        # if layer_class not in get_classes:  # no longer using this - skip class types not in list
        if layer_name not in get_layer_list:  # skip layers/classes not in list
            continue

        else:
            # record hid acts
            layer_activations = get_layer_acts(model=loaded_model,
                                               layer_name=layer_name,
                                               data_dict=data_dict,
                                               test_label_seqs=test_label_seqs,
                                               serial_recall=serial_recall,
                                               end_seq_cue=end_seq_cue,
                                               batch_size=batch_size,
                                               verbose=verbose)

            layer_acts_shape = np.shape(layer_activations)

            print(f"\nlen(layer_acts_shape): {len(layer_acts_shape)}")

            converted_to_2d = False  # set to True if 4d acts have been converted to 2d
            if len(layer_acts_shape) == 2:
                hid_acts = layer_activations

            elif len(layer_acts_shape) == 3:
                # if not serial_recall:
                #     ValueError(f"layer_acts_shape: {layer_acts_shape}"
                #                f"\n3d expected only for serial recall")
                # else:
                hid_acts = layer_activations

            # elif len(layer_acts_shape) == 4:  # # call mean_act_conv
            #     hid_acts = kernel_to_2d(layer_activations, verbose=True)
            #     layer_acts_shape = np.shape(hid_acts)
            #     converted_to_2d = True

            else:
                ValueError(
                    f"Unexpected number of dimensions for layer activations {layer_acts_shape}"
                )

            hid_acts_dict[index] = {
                'layer_name': layer_name,
                'layer_class': layer_class,
                "layer_shape": layer_acts_shape,
                'hid_acts': hid_acts
            }

            if converted_to_2d:
                hid_acts_dict[index]['converted_to_2d'] = True

            print(f"\nlayer {index}. layer_acts_shape: {layer_acts_shape}\n")

            # # save distplot for sanity check
            sns.distplot(np.ravel(hid_acts))
            plt.title(str(layer_name))
            plt.savefig(f"{layer_name}_act_distplot.png")
            plt.close()

        print("\n**** saving info to summary page and dictionary ****")

        hid_act_filenames = {'2d': None, 'any_d': None}
        dict_2d_save_name = f'{output_filename}_hid_act.pickle'
        with open(dict_2d_save_name,
                  "wb") as pkl:  # 'wb' mean 'w'rite the file in 'b'inary mode
            pickle.dump(hid_acts_dict, pkl)
        # np.save(dict_2d_save_name, hid_acts_dict)
        hid_act_filenames['2d'] = dict_2d_save_name

    cond = sim_dict["topic_info"]["cond"]
    run = sim_dict["topic_info"]["run"]
    if test_run:
        run = 'test'

    hid_units = sim_dict['model_info']['layers']['hid_layers']['hid_totals'][
        'analysable']

    trained_for = sim_dict["training_info"]["trained_for"]
    end_accuracy = sim_dict["training_info"]["acc"]
    dataset = sim_dict["data_info"]["dataset"]
    gha_date = int(datetime.datetime.now().strftime("%y%m%d"))
    gha_time = int(datetime.datetime.now().strftime("%H%M"))

    # gha_acc = scores_dict['gha_acc']
    # n_cats_correct = scores_dict['n_cats_correct']

    # # GHA_info_dict
    gha_dict_name = f"{output_filename}_GHA_dict.pickle"
    gha_dict_path = os.path.join(gha_path, gha_dict_name)

    gha_dict = {
        "topic_info": sim_dict['topic_info'],
        "data_info": sim_dict['data_info'],
        "model_info": sim_dict['model_info'],
        "training_info": sim_dict['training_info'],
        "GHA_info": {
            "use_dataset": use_dataset,
            'x_data_path': x_data_path,
            'y_data_path': test_label_name,
            'IPC_dict_path': test_IPC_name,
            'gha_path': gha_path,
            'gha_dict_path': gha_dict_path,
            "gha_incorrect": gha_incorrect,
            "hid_act_files": hid_act_filenames,
            'gha_key_layers': gha_key_layers,
            'key_n_units_fils': key_n_units_fils,
            "gha_date": gha_date,
            "gha_time": gha_time,
            "scores_dict": scores_dict,
        }
    }

    with open(gha_dict_name, "wb") as pickle_out:
        pickle.dump(gha_dict, pickle_out)

    if verbose:
        focussed_dict_print(gha_dict, 'gha_dict', ['GHA_info'])

    gha_info = [
        cond, run, output_filename,
        sim_dict['model_info']['overview']['model_name'], n_layers, hid_units,
        dataset, use_dataset, gha_incorrect, n_cats, timesteps, x_data_type,
        act_func, serial_recall, trained_for, end_accuracy, mean_IoU,
        prop_seq_corr, test_run, gha_date, gha_time
    ]

    # # check if gha_summary.csv exists

    # # save sel summary in exp folder not condition folder
    exp_name = sim_dict['topic_info']['exp_name']
    exp_path = find_path_to_dir(long_path=gha_path, target_dir=exp_name)
    os.chdir(exp_path)

    if not os.path.isfile(exp_name + "_GHA_summary.csv"):
        gha_summary = open(exp_name + "_GHA_summary.csv", 'w')
        mywriter = csv.writer(gha_summary)
        summary_headers = [
            "cond", "run", 'filename', 'model', "n_layers", "hid_units",
            "dataset", "GHA_on", 'incorrect', "n_cats", "timesteps",
            "x_data_type", "act_func", "serial_recall", "trained_for",
            "train_acc", "mean_IoU", "prop_seq_corr", "test_run", "gha_date",
            "gha_time"
        ]

        mywriter.writerow(summary_headers)
        print(f"creating summary csv at: {exp_path}")

    else:
        gha_summary = open(exp_name + "_GHA_summary.csv", 'a')
        mywriter = csv.writer(gha_summary)
        print(f"appending to summary csv at: {exp_path}")

    mywriter.writerow(gha_info)
    gha_summary.close()

    # make a list of dict names to do sel on
    if not os.path.isfile(f"{exp_name}_dict_list_for_sel.csv"):
        dict_list = open(f"{exp_name}_dict_list_for_sel.csv", 'w')
        mywriter = csv.writer(dict_list)
    else:
        dict_list = open(f"{exp_name}_dict_list_for_sel.csv", 'a')
        mywriter = csv.writer(dict_list)

    mywriter.writerow([gha_dict_name[:-7]])
    dict_list.close()

    print(f"\nadded to list for selectivity analysis: {gha_dict_name[:-7]}")

    print("\nend of ff_gha")

    return gha_dict
コード例 #15
0
ファイル: siamese_alt.py プロジェクト: mukhilazhagan/one_shot
input_a = Input(shape=input_shape)
input_b = Input(shape=input_shape)

# because we re-use the same instance `base_network`,
# the weights of the network
# will be shared across the two branches
processed_a = base_network(input_a)
processed_b = base_network(input_b)

distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)(
    [processed_a, processed_b])

model = Model([input_a, input_b], distance)

# train
rms = RMSprop()
model.compile(loss=contrastive_loss, optimizer=rms, metrics=[accuracy])
model.fit([train_pairs[:, 0], train_pairs[:, 1]],
          train_y,
          batch_size=128,
          epochs=epochs,
          validation_data=([test_pairs[:, 0], test_pairs[:, 1]], test_y))

#%% Prediction
# compute final accuracy on training and test sets
y_pred = model.predict([train_pairs[:, 0], train_pairs[:, 1]])
train_acc = compute_accuracy(train_y, y_pred)
y_pred = model.predict([test_pairs[:, 0], test_pairs[:, 1]])
test_acc = compute_accuracy(test_y, y_pred)

print('* Accuracy on training set: %0.2f%%' % (100 * train_acc))
コード例 #16
0
                  activation='relu',
                  padding='same'),
    BatchNormalization(),
    Convolution2D(filters=16,
                  kernel_size=(3, 3),
                  activation='relu',
                  padding='same'),
    BatchNormalization(),
    Flatten(),
    Dense(units=32, activation="relu"),
    Dropout(0.15),
    Dense(units=16, activation="relu"),
    Dropout(0.05),
    Dense(units=10, activation="softmax")
])
optim = RMSprop(lr=0.001)
model.compile(optimizer=optim,
              loss='categorical_crossentropy',
              metrics=['accuracy'])
history = model.fit(x_train,
                    to_categorical(y_train),
                    epochs=80,
                    validation_split=0.15,
                    verbose=1)
eval = model.evaluate(x_test, to_categorical(y_test))
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper left')
コード例 #17
0
    def createModel(self):
        self.model_instance += 1
        clear_session()

        features, label = self.getDataset()
        X_train, y_train = self.createLag(features, label)
        X_train = X_train[:, self.lags]

        learning_rate = float(self.hyperparameters["Learning_Rate"].get())
        momentum = float(self.hyperparameters["Momentum"].get())
        optimizers = {
            "Adam": Adam(learning_rate=learning_rate),
            "SGD": SGD(learning_rate=learning_rate, momentum=momentum),
            "RMSprop": RMSprop(learning_rate=learning_rate, momentum=momentum)
        }

        shape = (X_train.shape[1], X_train.shape[2])
        print(shape)
        model_choice = self.model_var.get()

        if not self.do_optimization:
            model = Sequential()
            model.add(Input(shape=shape))

            if model_choice == 0:
                model.add(Flatten())

            layers = self.no_optimization_choice_var.get()
            for i in range(layers):
                neuron_number = self.neuron_numbers_var[i].get()
                activation_function = self.activation_var[i].get()
                if model_choice == 0:
                    model.add(
                        Dense(neuron_number, activation=activation_function))

                elif model_choice == 1:
                    model.add(
                        Conv1D(filters=neuron_number,
                               kernel_size=2,
                               activation=activation_function))
                    model.add(MaxPooling1D(pool_size=2))

                elif model_choice == 2:
                    if i == layers - 1:
                        model.add(
                            LSTM(neuron_number,
                                 activation=activation_function,
                                 return_sequences=False))
                        model.add(Dropout(0.2))
                    else:
                        model.add(
                            LSTM(neuron_number,
                                 activation=activation_function,
                                 return_sequences=True))
                        model.add(Dropout(0.2))

                elif model_choice == 3:
                    if i == layers - 1:
                        model.add(
                            Bidirectional(
                                LSTM(neuron_number,
                                     activation=activation_function,
                                     return_sequences=False)))
                        model.add(Dropout(0.2))
                    else:
                        model.add(
                            Bidirectional(
                                LSTM(neuron_number,
                                     activation=activation_function,
                                     return_sequences=True)))
                        model.add(Dropout(0.2))

            if model_choice == 1:
                model.add(Flatten())
                model.add(Dense(32))

            model.add(Dense(1, activation=self.output_activation.get()))
            model.compile(
                optimizer=optimizers[self.hyperparameters["Optimizer"].get()],
                loss=self.hyperparameters["Loss_Function"].get())

            history = model.fit(
                X_train,
                y_train,
                epochs=self.hyperparameters["Epoch"].get(),
                batch_size=self.hyperparameters["Batch_Size"].get(),
                verbose=1)
            loss = history.history["loss"][-1]
            self.train_loss.set(loss)

        elif self.do_optimization:
            layer = self.optimization_choice_var.get()

            if model_choice == 0:

                def build_model(hp):
                    model = Sequential()
                    model.add(Input(shape=shape))
                    model.add(Flatten())
                    for i in range(layer):
                        n_min = self.neuron_min_number_var[i].get()
                        n_max = self.neuron_max_number_var[i].get()
                        step = int((n_max - n_min) / 4)
                        model.add(
                            Dense(units=hp.Int('MLP_' + str(i),
                                               min_value=n_min,
                                               max_value=n_max,
                                               step=step),
                                  activation='relu'))
                    model.add(Dense(1))
                    model.compile(
                        optimizer=optimizers[
                            self.hyperparameters["Optimizer"].get()],
                        loss=self.hyperparameters["Loss_Function"].get())
                    return model

                name = str(self.model_instance) + ". MLP"

            elif model_choice == 1:

                def build_model(hp):
                    model = Sequential()
                    model.add(Input(shape=shape))
                    for i in range(layer):
                        n_min = self.neuron_min_number_var[i].get()
                        n_max = self.neuron_max_number_var[i].get()
                        step = int((n_max - n_min) / 4)
                        model.add(
                            Conv1D(filters=hp.Int("CNN_" + str(i),
                                                  min_value=n_min,
                                                  max_value=n_max,
                                                  step=step),
                                   kernel_size=2,
                                   activation="relu"))
                        model.add(MaxPooling1D(pool_size=2))

                    model.add(Flatten())
                    model.add(Dense(32))
                    model.add(Dense(1))
                    model.compile(
                        optimizer=optimizers[
                            self.hyperparameters["Optimizer"].get()],
                        loss=self.hyperparameters["Loss_Function"].get())
                    return model

                name = str(self.model_instance) + ". CNN"

            elif model_choice == 2:

                def build_model(hp):
                    model = Sequential()
                    model.add(Input(shape=shape))
                    for i in range(layer):
                        n_min = self.neuron_min_number_var[i].get()
                        n_max = self.neuron_max_number_var[i].get()
                        step = int((n_max - n_min) / 4)
                        model.add(
                            LSTM(units=hp.Int("LSTM_" + str(i),
                                              min_value=n_min,
                                              max_value=n_max,
                                              step=step),
                                 activation='relu',
                                 return_sequences=True))
                        if i == layer - 1:
                            model.add(
                                LSTM(units=hp.Int("LSTM_" + str(i),
                                                  min_value=n_min,
                                                  max_value=n_max,
                                                  step=step),
                                     activation='relu',
                                     return_sequences=False))

                    model.add(Dense(1))
                    model.compile(
                        optimizer=optimizers[
                            self.hyperparameters["Optimizer"].get()],
                        loss=self.hyperparameters["Loss_Function"].get())
                    return model

                name = str(self.model_instance) + ". LSTM"

            elif model_choice == 3:

                def build_model(hp):
                    model = Sequential()
                    model.add(Input(shape=shape))
                    for i in range(layer):
                        n_min = self.neuron_min_number_var[i].get()
                        n_max = self.neuron_max_number_var[i].get()
                        step = int((n_max - n_min) / 4)
                        model.add(
                            Bidirectional(
                                LSTM(units=hp.Int("LSTM_" + str(i),
                                                  min_value=n_min,
                                                  max_value=n_max,
                                                  step=step),
                                     activation='relu',
                                     return_sequences=True)))
                        if i == layer - 1:
                            model.add(
                                Bidirectional(
                                    LSTM(units=hp.Int("LSTM_" + str(i),
                                                      min_value=n_min,
                                                      max_value=n_max,
                                                      step=step),
                                         activation='relu',
                                         return_sequences=False)))

                    model.add(Dense(1))
                    model.compile(
                        optimizer=optimizers[
                            self.hyperparameters["Optimizer"].get()],
                        loss=self.hyperparameters["Loss_Function"].get())
                    return model

                name = str(self.model_instance) + ". Bi-LSTM"

            tuner = RandomSearch(build_model,
                                 objective='loss',
                                 max_trials=25,
                                 executions_per_trial=2,
                                 directory=self.runtime,
                                 project_name=name)

            tuner.search(X_train,
                         y_train,
                         epochs=self.hyperparameters["Epoch"].get(),
                         batch_size=self.hyperparameters["Batch_Size"].get())
            hps = tuner.get_best_hyperparameters(num_trials=1)[0]
            model = tuner.hypermodel.build(hps)

            history = model.fit(
                X_train,
                y_train,
                epochs=self.hyperparameters["Epoch"].get(),
                batch_size=self.hyperparameters["Batch_Size"].get(),
                verbose=1)
            loss = history.history["loss"][-1]
            self.train_loss.set(loss)

            for i in range(layer):
                if model_choice == 0:
                    self.best_model_neurons[i].set(
                        model.get_layer(index=i + 1).get_config()["units"])
                elif model_choice == 1:
                    self.best_model_neurons[i].set(
                        model.get_layer(index=(2 * i)).get_config()["filters"])
                elif model_choice == 2:
                    self.best_model_neurons[i].set(
                        model.get_layer(index=i).get_config()["units"])
                elif model_choice == 3:
                    self.best_model_neurons[i].set(
                        model.get_layer(
                            index=i).get_config()["layer"]["config"]["units"])

        self.model = model
コード例 #18
0
    tf.keras.layers.Conv2D(16, (3, 3),
                           activation='relu',
                           input_shape=(128, 128, 3)),
    tf.keras.layers.MaxPool2D((2, 2)),
    tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
    tf.keras.layers.MaxPool2D((2, 2)),
    tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
    tf.keras.layers.MaxPool2D((2, 2)),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(512, activation='relu'),
    tf.keras.layers.Dense(4, activation='softmax')
])

print(model.summary())
# Compile model
model.compile(optimizer=RMSprop(),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

# Rescale images and initialize ImageDataGenerators
train_datagen = ImageDataGenerator(rescale=1 / 255.0)
validation_datagen = ImageDataGenerator(rescale=1 / 255.0)

# Read images from training directory and initialize train_datagenerator
TRAINING_DIR = 'data/tennis-data/training'
train_generator = train_datagen.flow_from_directory(TRAINING_DIR,
                                                    target_size=(128, 128),
                                                    batch_size=10,
                                                    class_mode='categorical')

# Read images from validation directory and initialize validation_datagenerator
コード例 #19
0
        y = Dropout(0.2)(y)
    x = AveragePooling2D()(y)

# add classifier on top
# after average pooling, size of feature map is 1 x 1
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
                kernel_initializer='he_normal',
                activation='softmax')(y)

# instantiate and compile model
# orig paper uses SGD but RMSprop works better for DenseNet
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy',
              optimizer=RMSprop(1e-3),
              metrics=['acc'])
model.summary()
plot_model(model, to_file="cifar10-densenet.png", show_shapes=True)

# prepare model model saving directory
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'cifar10_densenet_model.{epoch:02d}.h5'
if not os.path.isdir(save_dir):
    os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)

# prepare callbacks for model saving and for learning rate reducer
checkpoint = ModelCheckpoint(filepath=filepath,
                             monitor='val_acc',
                             verbose=1,
コード例 #20
0
    def __init__(
        self,
        image_size,
        channels,
        conv_layers,
        feature_maps,
        filter_shapes,
        strides,
        dense_layers,
        dense_neurons,
        dense_dropouts,
        latent_dim,
        activation="relu",
        eps_mean=0.0,
        eps_std=1.0,
    ):

        self.history = LossHistory()

        # tensorflow.config.experimental_run_functions_eagerly(False)

        # check that arguments are proper length;
        if len(filter_shapes) != conv_layers:
            raise Exception(
                "number of convolutional layers must equal length of filter_shapes list"
            )
        if len(strides) != conv_layers:
            raise Exception(
                "number of convolutional layers must equal length of strides list"
            )
        if len(feature_maps) != conv_layers:
            raise Exception(
                "number of convolutional layers must equal length of feature_maps list"
            )
        if len(dense_neurons) != dense_layers:
            raise Exception(
                "number of dense layers must equal length of dense_neurons list"
            )
        if len(dense_dropouts) != dense_layers:
            raise Exception(
                "number of dense layers must equal length of dense_dropouts list"
            )

        # even shaped filters may cause problems in theano backend;
        # even_filters = [f for pair in filter_shapes for f in pair if f % 2 == 0]
        # if K.image_dim_ordering() == 'th' and len(even_filters) > 0:
        #    warnings.warn('Even shaped filters may cause problems in Theano backend')
        # if K.image_dim_ordering() == 'channels_first' and len(even_filters) > 0:
        #    warnings.warn('Even shaped filters may cause problems in Theano backend')

        self.eps_mean = eps_mean
        self.eps_std = eps_std
        self.image_size = image_size

        # define input layer;
        if K.image_data_format() == "channels_first":
            self.input = Input(shape=(channels, image_size[0], image_size[1]))
        else:
            self.input = Input(shape=(image_size[0], image_size[1], channels))

        # define convolutional encoding layers;
        self.encode_conv = []
        layer = Convolution2D(
            feature_maps[0],
            filter_shapes[0],
            padding="same",
            activation=activation,
            strides=strides[0],
        )(self.input)
        self.encode_conv.append(layer)
        for i in range(1, conv_layers):
            layer = Convolution2D(
                feature_maps[i],
                filter_shapes[i],
                padding="same",
                activation=activation,
                strides=strides[i],
            )(self.encode_conv[i - 1])
            self.encode_conv.append(layer)

        # define dense encoding layers;
        self.flat = Flatten()(self.encode_conv[-1])
        self.encode_dense = []
        layer = Dense(dense_neurons[0], activation=activation)(
            Dropout(dense_dropouts[0])(self.flat)
        )
        self.encode_dense.append(layer)
        for i in range(1, dense_layers):
            layer = Dense(dense_neurons[i], activation=activation)(
                Dropout(dense_dropouts[i])(self.encode_dense[i - 1])
            )
            self.encode_dense.append(layer)

        # define embedding layer;
        self.z_mean = Dense(latent_dim)(self.encode_dense[-1])
        self.z_log_var = Dense(latent_dim)(self.encode_dense[-1])
        self.z = Lambda(self._sampling, output_shape=(latent_dim,))(
            [self.z_mean, self.z_log_var]
        )

        # save all decoding layers for generation model;
        self.all_decoding = []

        # define dense decoding layers;
        self.decode_dense = []
        layer = Dense(dense_neurons[-1], activation=activation)
        self.all_decoding.append(layer)
        self.decode_dense.append(layer(self.z))
        for i in range(1, dense_layers):
            layer = Dense(dense_neurons[-i - 1], activation=activation)
            self.all_decoding.append(layer)
            self.decode_dense.append(layer(self.decode_dense[i - 1]))

        # dummy model to get image size after encoding convolutions;
        self.decode_conv = []
        if K.image_data_format() == "channels_first":
            dummy_input = np.ones((1, channels, image_size[0], image_size[1]))
        else:
            dummy_input = np.ones((1, image_size[0], image_size[1], channels))
        dummy = Model(self.input, self.encode_conv[-1])
        conv_size = dummy.predict(dummy_input).shape
        layer = Dense(conv_size[1] * conv_size[2] * conv_size[3], activation=activation)
        self.all_decoding.append(layer)
        self.decode_dense.append(layer(self.decode_dense[-1]))
        reshape = Reshape(conv_size[1:])
        self.all_decoding.append(reshape)
        self.decode_conv.append(reshape(self.decode_dense[-1]))

        # define deconvolutional decoding layers;
        for i in range(1, conv_layers):
            if K.image_data_format() == "channels_first":
                dummy_input = np.ones((1, channels, image_size[0], image_size[1]))
            else:
                dummy_input = np.ones((1, image_size[0], image_size[1], channels))
            dummy = Model(self.input, self.encode_conv[-i - 1])
            conv_size = list(dummy.predict(dummy_input).shape)

            if K.image_data_format() == "channels_first":
                conv_size[1] = feature_maps[-i]
            else:
                conv_size[3] = feature_maps[-i]

            layer = Conv2DTranspose(
                feature_maps[-i - 1],
                filter_shapes[-i],
                padding="same",
                activation=activation,
                strides=strides[-i],
            )
            self.all_decoding.append(layer)
            self.decode_conv.append(layer(self.decode_conv[i - 1]))

        layer = Conv2DTranspose(
            channels,
            filter_shapes[0],
            padding="same",
            activation="sigmoid",
            strides=strides[0],
        )
        self.all_decoding.append(layer)
        self.output = layer(self.decode_conv[-1])

        # build model;
        self.model = Model(self.input, self.output)
        self.optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
        # KLD loss
        self.model.add_loss(
            -0.5
            * K.mean(
                1 + self.z_log_var - K.square(self.z_mean) - K.exp(self.z_log_var),
                axis=None,
            )
        )
        self.model.compile(optimizer=self.optimizer, loss=self._vae_loss1)
        # self.model.compile(optimizer=self.optimizer)
        # self.model.compile(optimizer=self.optimizer, loss=objectives.MeanSquaredError());
        self.model.summary()

        # model for embeddings;
        self.embedder = Model(self.input, self.z_mean)

        # model for generation;
        self.decoder_input = Input(shape=(latent_dim,))
        self.generation = []
        self.generation.append(self.all_decoding[0](self.decoder_input))
        for i in range(1, len(self.all_decoding)):
            self.generation.append(self.all_decoding[i](self.generation[i - 1]))
        self.generator = Model(self.decoder_input, self.generation[-1])
コード例 #21
0
    image = train_images[num].reshape([28, 28])
    plt.title('Sample: %d  Label: %d' % (num, label))
    plt.imshow(image, cmap=plt.get_cmap('gray_r'))
    plt.show()


display_sample(100)

model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784, )))
model.add(Dense(10, activation='softmax'))

model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer=RMSprop(),
              metrics=['accuracy'])

history = model.fit(train_images,
                    train_labels,
                    batch_size=100,
                    epochs=10,
                    verbose=2,
                    validation_data=(test_images, test_labels))

score = model.evaluate(test_images, test_labels, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

for x in range(1000):
    test_image = test_images[x, :].reshape(1, 784)
コード例 #22
0
ファイル: img_gen.py プロジェクト: carusocr/misc_ml
    train_dir,  #target dir
    target_size=(300, 300),  # this resizes images as they're loaded
    batch_size=128,
    class_mode='binary')

val_datagen = ImageDataGenerator(rescale=1. / 255)  # rescale normalizes data

valgen = val_datagen.flow_from_directory(
    val_dir,  #target dir
    target_size=(300, 300),  # this resizes images as they're loaded
    batch_size=32,
    class_mode='binary')

model.compile(
    loss='binary_crossentropy',
    optimizer=RMSprop(
        lr=0.001),  # RMSprop allows for adjustment of learning rate
    metrics=['acc'])

#training
history = model.fit_generator(train_generator,
                              steps_per_epoch=8,
                              epochs=15,
                              validation_data=valgen,
                              validation_steps=8,
                              verbose=2)

for fn in uploaded.keys():
    #predicting images
    path = '/content/' + fn
    img = image.load_img(path, target_size=(300, 300))
    x = image.img_to_array(img)
コード例 #23
0
print(len(os.listdir('/tmp/cats-v-dogs/testing/cats/')))
print(len(os.listdir('/tmp/cats-v-dogs/testing/dogs/')))

model = tf.keras.models.Sequential([
    tf.keras.layers.Conv2D(16,(3,3), activation = 'relu', input_shape=(150,150,3)),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Conv2D(32,(3,3), activation = 'relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Conv2D(64,(3,3), activation = 'relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(512, activation = 'relu'),
    tf.keras.layers.Dense(1, activation='sigmoid')
])

model.compile(optimizer=RMSprop(lr=0.001), loss='binary_crossentropy', metrics=['acc'])

TRAINING_DIR = '/tmp/cats-v-dogs/training/'
train_datagen = ImageDataGenerator(rescale=1.0/255.)

train_generator = train_datagen.flow_from_directory(TRAINING_DIR,
                                                         batch_size=100,
                                                         class_mode='binary', 
                                                         target_size=(150,150))

VALIDATION_DIR = '/tmp/cats-v-dogs/testing/'
validation_datagen = ImageDataGenerator(rescale=1.0/255.)

validation_generator = validation_datagen.flow_from_directory(VALIDATION_DIR,
                                                         batch_size=100,
                                                         class_mode='binary', 
コード例 #24
0
ファイル: train.py プロジェクト: notha99y/personalKB
model = Sequential([
    Conv2D(16, (3, 3), activation="relu",
           input_shape=(height, width, channel)),
    MaxPooling2D(2, 2),
    Conv2D(32, (3, 3), activation="relu"),
    MaxPooling2D(2, 2),
    Conv2D(64, (3, 3), activation="relu"),
    MaxPooling2D(2, 2),
    Flatten(),
    Dense(512, activation="relu"),
    Dense(1, activation="sigmoid"),
])

model.compile(
    optimizer=RMSprop(lr=1e-3),
    loss="binary_crossentropy",
    metrics=["accuracy"],
)

print(model.summary())

train_datagen = ImageDataGenerator(rescale=1 / 255.0)
train_dir = Path(".") / "dogvscat" / "train"
val_dir = Path(".") / "dogvscat" / "validation"
train_generator = train_datagen.flow_from_directory(
    train_dir,
    batch_size=batch_size,
    class_mode="binary",
    target_size=(height, width),
)
コード例 #25
0
def build_cyclegan(shapes,
                   source_name='source',
                   target_name='target',
                   kernel_size=3,
                   patchgan=False,
                   identity=False):
    """Build the CycleGAN

    1) Build target and source discriminators
    2) Build target and source generators
    3) Build the adversarial network

    Arguments:
    shapes (tuple): source and target shapes
    source_name (string): string to be appended on dis/gen models
    target_name (string): string to be appended on dis/gen models
    kernel_size (int): kernel size for the encoder/decoder
        or dis/gen models
    patchgan (bool): whether to use patchgan on discriminator
    identity (bool): whether to use identity loss

    Returns:
    (list): 2 generator, 2 discriminator,
        and 1 adversarial models

    """

    source_shape, target_shape = shapes
    lr = 2e-4
    decay = 6e-8
    gt_name = "gen_" + target_name
    gs_name = "gen_" + source_name
    dt_name = "dis_" + target_name
    ds_name = "dis_" + source_name

    # build target and source generators
    g_target = build_generator(source_shape,
                               target_shape,
                               kernel_size=kernel_size,
                               name=gt_name)
    g_source = build_generator(target_shape,
                               source_shape,
                               kernel_size=kernel_size,
                               name=gs_name)
    print('---- TARGET GENERATOR ----')
    g_target.summary()
    print('---- SOURCE GENERATOR ----')
    g_source.summary()

    # build target and source discriminators
    d_target = build_discriminator(target_shape,
                                   patchgan=patchgan,
                                   kernel_size=kernel_size,
                                   name=dt_name)
    d_source = build_discriminator(source_shape,
                                   patchgan=patchgan,
                                   kernel_size=kernel_size,
                                   name=ds_name)
    print('---- TARGET DISCRIMINATOR ----')
    d_target.summary()
    print('---- SOURCE DISCRIMINATOR ----')
    d_source.summary()

    optimizer = RMSprop(lr=lr, decay=decay)
    d_target.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])
    d_source.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])

    d_target.trainable = False
    d_source.trainable = False

    # build the computational graph for the adversarial model
    # forward cycle network and target discriminator
    source_input = Input(shape=source_shape)
    fake_target = g_target(source_input)
    preal_target = d_target(fake_target)
    reco_source = g_source(fake_target)

    # backward cycle network and source discriminator
    target_input = Input(shape=target_shape)
    fake_source = g_source(target_input)
    preal_source = d_source(fake_source)
    reco_target = g_target(fake_source)

    # if we use identity loss, add 2 extra loss terms
    # and outputs
    if identity:
        iden_source = g_source(source_input)
        iden_target = g_target(target_input)
        loss = ['mse', 'mse', 'mae', 'mae', 'mae', 'mae']
        loss_weights = [1., 1., 10., 10., 0.5, 0.5]
        inputs = [source_input, target_input]
        outputs = [
            preal_source, preal_target, reco_source, reco_target, iden_source,
            iden_target
        ]
    else:
        loss = ['mse', 'mse', 'mae', 'mae']
        loss_weights = [1., 1., 10., 10.]
        inputs = [source_input, target_input]
        outputs = [preal_source, preal_target, reco_source, reco_target]

    # build adversarial model
    adv = Model(inputs, outputs, name='adversarial')
    optimizer = RMSprop(lr=lr * 0.5, decay=decay * 0.5)
    adv.compile(loss=loss,
                loss_weights=loss_weights,
                optimizer=optimizer,
                metrics=['accuracy'])
    print('---- ADVERSARIAL NETWORK ----')
    adv.summary()

    return g_source, g_target, d_source, d_target, adv
コード例 #26
0
def main():
    if args.crop_size:
        print('Using crops of shape ({}, {})'.format(args.crop_size,
                                                     args.crop_size))
    else:
        print('Using full size images')

    with K.get_session().as_default():
        # K.set_session(sess)

        all_ids = np.array(generate_ids(args.data_dirs, args.clahe))
        np.random.seed(args.seed)
        kfold = KFold(n_splits=args.n_folds, shuffle=True)

        splits = [s for s in kfold.split(all_ids)]
        folds = [int(f) for f in args.fold.split(",")]
        for fold in folds:
            encoded_alias = encode_params(args.clahe,
                                          args.preprocessing_function,
                                          args.stretch_and_mean)
            city = "all"
            if args.city:
                city = args.city.lower()
            best_model_file = '{}/{}_{}_{}.h5'.format(args.models_dir,
                                                      encoded_alias, city,
                                                      args.network)
            channels = 8
            if args.ohe_city:
                channels = 12
            model = make_model(args.network,
                               (args.crop_size, args.crop_size, channels))

            if args.weights is None:
                print('No weights passed, training from scratch')
            else:
                print('Loading weights from {}'.format(args.weights))
                model.load_weights(args.weights, by_name=True)
            freeze_model(model, args.freeze_till_layer)

            optimizer = RMSprop(lr=args.learning_rate)
            if args.optimizer:
                if args.optimizer == 'rmsprop':
                    optimizer = RMSprop(lr=args.learning_rate)
                elif args.optimizer == 'adam':
                    optimizer = Adam(lr=args.learning_rate)
                elif args.optimizer == 'sgd':
                    optimizer = SGD(lr=args.learning_rate,
                                    momentum=0.9,
                                    nesterov=True)

            train_ind, test_ind = splits[fold]
            train_ids = all_ids[train_ind]
            val_ids = all_ids[test_ind]
            if args.city:
                val_ids = [id for id in val_ids if args.city in id[0]]
                train_ids = [id for id in train_ids if args.city in id[0]]
            print('Training fold #{}, {} in train_ids, {} in val_ids'.format(
                fold, len(train_ids), len(val_ids)))
            masks_gt = get_groundtruth(args.data_dirs)
            # if args.clahe:
            #     template = 'CLAHE-MUL-PanSharpen/MUL-PanSharpen_{id}.tif'
            # else:
            #     template = 'MUL-PanSharpen/MUL-PanSharpen_{id}.tif'
            template = 'PS-MS/SN3_roads_train_AOI_5_Khartoum_PS-MS_{id}.tif'

            train_generator = MULSpacenetDataset(
                data_dirs=args.data_dirs,
                wdata_dir=args.wdata_dir,
                clahe=args.clahe,
                batch_size=args.batch_size,
                image_ids=train_ids,
                masks_dict=masks_gt,
                image_name_template=template,
                seed=args.seed,
                ohe_city=args.ohe_city,
                stretch_and_mean=args.stretch_and_mean,
                preprocessing_function=args.preprocessing_function,
                crops_per_image=args.crops_per_image,
                crop_shape=(args.crop_size, args.crop_size),
                random_transformer=RandomTransformer(horizontal_flip=True,
                                                     vertical_flip=True),
            )

            val_generator = MULSpacenetDataset(
                data_dirs=args.data_dirs,
                wdata_dir=args.wdata_dir,
                clahe=args.clahe,
                batch_size=1,
                image_ids=val_ids,
                image_name_template=template,
                masks_dict=masks_gt,
                seed=args.seed,
                ohe_city=args.ohe_city,
                stretch_and_mean=args.stretch_and_mean,
                preprocessing_function=args.preprocessing_function,
                shuffle=False,
                crops_per_image=1,
                crop_shape=(args.crop_size, args.crop_size),
                random_transformer=None)
            best_model = ModelCheckpoint(filepath=best_model_file,
                                         monitor='val_dice_coef_clipped',
                                         verbose=1,
                                         mode='max',
                                         save_best_only=False,
                                         save_weights_only=False)

            model.compile(loss=make_loss(args.loss_function),
                          optimizer=optimizer,
                          metrics=[
                              dice_coef, binary_crossentropy, ceneterline_loss,
                              dice_coef_clipped
                          ])

            def schedule_steps(epoch, steps):
                for step in steps:
                    if step[1] > epoch:
                        print("Setting learning rate to {}".format(step[0]))
                        return step[0]
                print("Setting learning rate to {}".format(steps[-1][0]))
                return steps[-1][0]

            callbacks = [
                best_model,
                EarlyStopping(patience=20,
                              verbose=1,
                              monitor='val_dice_coef_clipped',
                              mode='max')
            ]

            if args.schedule is not None:
                steps = [(float(step.split(":")[0]), int(step.split(":")[1]))
                         for step in args.schedule.split(",")]
                lrSchedule = LearningRateScheduler(
                    lambda epoch: schedule_steps(epoch, steps))
                callbacks.insert(0, lrSchedule)

            if args.clr is not None:
                clr_params = args.clr.split(',')
                base_lr = float(clr_params[0])
                max_lr = float(clr_params[1])
                step = int(clr_params[2])
                mode = clr_params[3]
                clr = CyclicLR(base_lr=base_lr,
                               max_lr=max_lr,
                               step_size=step,
                               mode=mode)
                callbacks.append(clr)

            steps_per_epoch = len(all_ids) / args.batch_size + 1
            if args.steps_per_epoch:
                steps_per_epoch = args.steps_per_epoch

            model.fit_generator(train_generator,
                                steps_per_epoch=steps_per_epoch,
                                epochs=args.epochs,
                                validation_data=val_generator,
                                validation_steps=len(val_ids),
                                callbacks=callbacks,
                                max_queue_size=30,
                                verbose=1,
                                workers=args.num_workers)

            # for node in tf.get_default_graph().as_graph_def().node:
            #     if "training" not in node.name:
            #         print(node.name)

            vm.Model.save(args.lucid_save_name,
                          input_name='input',
                          image_shape=[args.crop_size, args.crop_size, 3],
                          output_names=['mask/Sigmoid'],
                          image_value_range=[-1, 1])

            del model
            K.clear_session()
            gc.collect()
コード例 #27
0
        
model = tf.keras.models.Sequential([
    tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)),
    tf.keras.layers.MaxPooling2D(2, 2),
    tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(512, activation='relu'),
    tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',
              optimizer=RMSprop(lr=1e-4),
              metrics=['acc'])

```
#rotation_range is a value in degrees (0–180), a range within which to randomly rotate pictures.
#width_shift and height_shift are ranges (as a fraction of total width or height) within which to randomly translate pictures vertically or horizontally.
#shear_range is for randomly applying shearing transformations.
#zoom_range is for randomly zooming inside pictures.
``` 
##########              
train_datagen = ImageDataGenerator(rescale=1./255
,rotation_range=50,
 width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2
zoom_range=0.2
コード例 #28
0
output_dim = embedding_size = 128.
'''
kmodel = layers.Embedding(max_features, 128)(input_tensor)
kmodel = layers.Conv1D(32, 7, activation='relu')(kmodel)
kmodel = layers.MaxPooling1D(5)(kmodel)
kmodel = layers.Conv1D(32, 7, activation='relu')(kmodel)
kmodel = layers.GlobalMaxPooling1D()(
    kmodel)  # ends with either this or Flatten() to turn the 3D
# inputs into 2D outputs, allowing us to add one or
# more Dense layers to the model for classification
# or regression.
output_tensor = layers.Dense(1, activation='sigmoid')(kmodel)
model = models.Model(input_tensor, output_tensor)
model.summary()

model.compile(optimizer=RMSprop(lr=1e-4),
              loss='binary_crossentropy',
              metrics=['acc'])
history = model.fit(x_train,
                    y_train,
                    epochs=10,
                    batch_size=128,
                    validation_split=0.2)

# plot results
loss = history.history['loss']
val_loss = history.history['val_loss']
acc = history.history['acc']
val_acc = history.history['val_acc']
epochs = range(1, len(loss) + 1)
コード例 #29
0
    tf.keras.layers.Conv2D(16, (3, 3),
                           input_shape=(300, 300, 3),
                           activation='relu'),
    tf.keras.layers.Maxpooling2D(2, 2),
    tf.keras.layers.Conv2D(32, (3, 3), activation="relu"),
    tf.keras.layers.Maxpooling2D(2, 2),
    tf.keras.layers.Conv2D(32, (3, 3), activation="relu"),
    tf.keras.layers.Maxpooling2D(2, 2),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(512, activation='relu'),
    tf.keras.layers.Dense(1, activation='sigmoid')
])

from tensorflow.keras.optimizers import RMSprop

model.compile(optimizer=RMSprop(lr=0.001),
              loss='binary_crossentropy',
              metrics=['acc'])

from tensorflow.keras.preprocessing.image import ImageDataGenerator

# All images will be rescaled by 1./255.
train_datagen = ImageDataGenerator(rescale=1.0 / 255.)
test_datagen = ImageDataGenerator(rescale=1.0 / 255.)

# --------------------
# Flow training images in batches of 20 using train_datagen generator
# --------------------
train_generator = train_datagen.flow_from_directory(train_dir,
                                                    batch_size=20,
                                                    class_mode='binary',
コード例 #30
0
ファイル: assignment_5.py プロジェクト: Fangyu-Du/assignment5
    tf.keras.layers.MaxPooling2D(2, 2),
    # Flatten the results into a one dimension data to feed into a DNN
    tf.keras.layers.Flatten(),
    # 512 neuron hidden layer
    tf.keras.layers.Dense(512, activation='relu'),
    # Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('cats') and 1 for the other ('dogs')
    # Note that because we are facing a two-class classification problem, i.e. a binary classification problem, we will
    # end our network with a sigmoid activation, so that the output of our network will be a single scalar between 0 and 1,
    # encoding the probability that the current image is class 1 (as opposed to class 0).
    tf.keras.layers.Dense(1, activation='sigmoid')
])

from tensorflow.keras.optimizers import RMSprop

model.compile(
    optimizer=RMSprop(lr=0.0001),  #learning rate of 0.0001. 
    loss=
    'binary_crossentropy',  #binary_crossentropy loss, because it's a binary classification problem and our final activation is a sigmoid.
    metrics=['acc'])

#training
history = model.fit_generator(train_generator, epochs=10, verbose=1)
##accuracy is 1

#3)4)
import numpy as np

from google.colab import files
from keras.preprocessing import image

uploaded = files.upload()