コード例 #1
0
def construct_VGG19(input_shape=(224, 224, 3), classes=7):
    # Stack up the layers
    X_Input = Input(input_shape)
    # Stage 1
    X = Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1),
               padding='same')(X_Input)
    X = BatchNormalization(axis=3)(X)
    X = Activation('relu')(X)
    X = Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1),
               padding='same')(X)
    X = BatchNormalization(axis=3)(X)
    X = Activation('relu')(X)

    # Apply Max Pool
    X = apply_maxpool(X)
    print(X.shape)

    # Stage 2
    for i in range(2):
        X = Conv2D(filters=128,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   padding='same')(X)
        X = BatchNormalization(axis=3)(X)
        X = Activation('relu')(X)

    # Apply Max Pool
    X = apply_maxpool(X)
    print(X.shape)

    # Stage 3
    for i in range(4):
        X = Conv2D(filters=256,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   padding='same')(X)
        X = BatchNormalization(axis=3)(X)
        X = Activation('relu')(X)
    # Apply Max Pool
    X = apply_maxpool(X)
    print(X.shape)

    # Stage 4
    for i in range(4):
        X = Conv2D(filters=512,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   padding='same')(X)
        X = BatchNormalization(axis=3)(X)
        X = Activation('relu')(X)
    # Apply Max Pool
    X = apply_maxpool(X)
    print(X.shape)

    # Stage 5
    for i in range(4):
        X = Conv2D(filters=512,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   padding='same')(X)
        X = BatchNormalization(axis=3)(X)
        X = Activation('relu')(X)
    # Apply Max Pool
    X = apply_maxpool(X)
    print(X.shape)

    # Flatten this layer
    X = Flatten()(X)

    # Dense Layers
    for i in range(2):
        X = Dense(4096, activation='relu')(X)

    # Last layers
    X = Dense(classes, activation='softmax')(X)

    # Create Model
    model = Model(inputs=X_Input, outputs=X)
    return model
コード例 #2
0
    log_dir = 'logs/'
    #----------------------------------------------#
    #   输入的shape大小
    #   显存比较小可以使用416x416
    #   现存比较大可以使用608x608
    #----------------------------------------------#
    input_shape = (416, 416)
    mosaic = False
    Cosine_scheduler = False
    label_smoothing = 0

    # 清除session
    K.clear_session()

    # 输入的图像为
    image_input = Input(shape=(None, None, 3))
    h, w = input_shape

    # 创建yolo模型
    print('Create YOLOv3 model with {} anchors and {} classes.'.format(
        num_anchors, num_classes))
    model_body = yolo_body(image_input, num_anchors // 2, num_classes)

    model_body.summary()
    #-------------------------------------------#
    #   权值文件的下载请看README
    #-------------------------------------------#
    print('Load weights {}.'.format(weights_path))
    model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)

    # y_true为13,13,3,85
コード例 #3
0
# N = number of samples
# T = sequence length
# D = number of input features
# M = number of hidden units
# K = number of output units

# Make some data
N = 1
T = 10
D = 3
K = 2
X = np.random.randn(N, T, D)

# Make an RNN
M = 5  # number of hidden units
i = Input(shape=(T, D))
x = SimpleRNN(M)(i)
x = Dense(K)(x)

model = Model(i, x)

# Get the output
Yhat = model.predict(X)
print(Yhat)

# See if we can replicate this output
# Get the weights first
model.summary()

# See what's returned
model.layers[1].get_weights()
コード例 #4
0
ファイル: train.py プロジェクト: skritik098/mlops-workflow
# In[17]:

from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.datasets import mnist
from tensorflow.keras.activations import relu
from keras.utils.np_utils import to_categorical

# In[2]:

(x_train, y_train), (x_test, y_test) = mnist.load_data()

# In[19]:

inp_layer = Input(shape=784)
x = Dense(64, activation=relu)(inp_layer)
out = Dense(10, activation='softmax')(x)
model = Model(inputs=[inp_layer], outputs=[out])

# In[20]:

model.summary()

# In[21]:

X_train = x_train.reshape(-1, 28 * 28)
X_test = x_test.reshape(-1, 28 * 28)

# In[22]:
コード例 #5
0
def GCN(
    loss='MSE',
    num_objects=80,
    num_relation=3,
    embed_dim=64,
    Din=128,
    H=512,
    Dout=128,
    batch_size=1,
    mask_size=16,
    num_rooms=35,
    lr=1e-4,
):

    num_edges = int(num_rooms * (num_rooms - 1) / 2)

    input_o = Input(shape=num_rooms, dtype=tf.int32, batch_size=batch_size)
    input_p = Input(shape=num_edges, dtype=tf.float32, batch_size=batch_size)
    input_t = Input(shape=(num_edges, 2),
                    dtype=tf.int32,
                    batch_size=batch_size)

    box_gt = Input(shape=(num_rooms, 4),
                   dtype=tf.float32,
                   batch_size=batch_size)
    mask_gt = Input(shape=(num_rooms, mask_size, mask_size),
                    dtype=tf.int32,
                    batch_size=batch_size)

    #Embedding to dense vectors
    embedding_o = Embedding(input_dim=num_objects,
                            output_dim=embed_dim,
                            input_length=num_rooms,
                            mask_zero=True)(input_o)
    embedding_p = Embedding(input_dim=num_relation,
                            output_dim=embed_dim,
                            input_length=num_edges,
                            mask_zero=True)(input_p)

    #Graph Convolutions
    new_s_obj, new_p_obj = GraphTripleConvNet(input_dim=Din,
                                              hidden_dim=H,
                                              batch_size=batch_size)(
                                                  embedding_o, embedding_p,
                                                  input_t)

    #box and mask nets to get scene layout
    output_box = box_net(gconv_dim=Dout)(new_s_obj)
    output_mask = Mask_regression(num_chan=Dout,
                                  mask_size=mask_size)(new_s_obj)

    output_rel = rel_aux_net(gconv_out=Dout,
                             gconv_hidden_dim=H,
                             out_dim=num_relation,
                             batch_size=batch_size)(embedding_o, output_box,
                                                    input_t)

    model = Model([input_o, input_p, input_t, box_gt, mask_gt],
                  [output_box, output_mask, output_rel])

    model.add_loss(
        total_loss(box_gt, mask_gt, input_p, output_box, output_mask,
                   output_rel, loss))
    model.compile(optimizer=optimizers.Adam(learning_rate=lr))

    return model
コード例 #6
0
x_train[0]


# # Model Specification

# In[9]:


from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, concatenate
from tensorflow.keras.layers import Input


# define two sets of inputs
low_rf  = Input(shape=(X.shape[2],))
high_rf = Input(shape=(X.shape[2],))

# the first branch operates on the first input
x1 = Dense(500 , activation="relu")(low_rf)
# x1 = Dense(500, activation="relu")(x1)
x1 = Model(inputs=low_rf, outputs=x1)

# the second branch operates on the second input
x2 = Dense(500 , activation="relu")(high_rf)
# x2 = Dense(500, activation="relu")(x2)
x2 = Model(inputs=high_rf, outputs=x2)

# combine the output of the two branches
combined = concatenate([x1.output, x2.output])
コード例 #7
0
                                              target_size=(W, H),
                                              batch_size=TRAIN_BATCH,
                                              class_mode='raw')

#%%
test_generator = datagen.flow_from_directory(directory=TEST_PATH,
                                             target_size=(W, H),
                                             class_mode=None,
                                             batch_size=TEST_BATCH)

print('Done.')

#%% Define Model

# Build the model
inputs = Input((H, W, C))

c1 = Conv2D(8, (3, 3), activation='relu', padding='same') (inputs)
c1 = Conv2D(8, (3, 3), activation='relu', padding='same') (c1)
p1 = MaxPooling2D((2, 2)) (c1)

c2 = Conv2D(16, (3, 3), activation='relu', padding='same') (p1)
c2 = Conv2D(16, (3, 3), activation='relu', padding='same') (c2)
p2 = MaxPooling2D((2, 2)) (c2)

c3 = Conv2D(32, (3, 3), activation='relu', padding='same') (p2)
c3 = Conv2D(32, (3, 3), activation='relu', padding='same') (c3)
p3 = MaxPooling2D((2, 2)) (c3)

c4 = Conv2D(64, (3, 3), activation='relu', padding='same') (p3)
c4 = Conv2D(64, (3, 3), activation='relu', padding='same') (c4)
コード例 #8
0
ファイル: Model.py プロジェクト: gsaha009/HHbbWWAnalysis
def NeuralNetGeneratorModel(x_train, y_train, x_val, y_val, params):
    """
    Keras model for the Neural Network, used to scan the hyperparameter space by Talos
    Uses the generator rather than the input data (which are dummies)
    """
    # Scaler #
    with open(parameters.scaler_path,
              'rb') as handle:  # Import scaler that was created before
        scaler = pickle.load(handle)

    # Design network #

    # Left branch : classic inputs -> Preprocess -> onehot
    inputs_numeric = []
    means = []
    variances = []
    inputs_all = []
    encoded_all = []
    for idx in range(x_train.shape[1]):
        inpName = parameters.inputs[idx].replace('$', '').replace(' ',
                                                                  '').replace(
                                                                      '_', '')
        input_layer = tf.keras.Input(shape=(1, ), name=inpName)
        # Categorical inputs #
        if parameters.mask_op[idx]:
            operation = getattr(Operations, parameters.operations[idx])()
            encoded_all.append(operation(input_layer))
        # Numerical inputs #
        else:
            inputs_numeric.append(input_layer)
            means.append(scaler.mean_[idx])
            variances.append(scaler.var_[idx])
        inputs_all.append(input_layer)

    # Concatenate all numerical inputs #
    if int(tf_version[1]) < 4:
        normalizer = preprocessing.Normalization(name='Normalization')
        x_dummy = np.ones((10, len(means)))
        # Needs a dummy to call the adapt method before setting the weights
        normalizer.adapt(x_dummy)
        normalizer.set_weights([np.array(means), np.array(variances)])
    else:
        normalizer = preprocessing.Normalization(mean=means,
                                                 variance=variances,
                                                 name='Normalization')
    encoded_all.append(
        normalizer(tf.keras.layers.concatenate(inputs_numeric,
                                               name='Numerics')))

    if len(encoded_all) > 1:
        all_features = tf.keras.layers.concatenate(encoded_all,
                                                   axis=-1,
                                                   name="Features")
    else:
        all_features = encoded_all[0]

    # Right branch : LBN
    lbn_input_shape = (len(parameters.LBN_inputs) // 4, 4)
    input_lbn_Layer = Input(shape=lbn_input_shape, name='LBN_inputs')
    lbn_layer = LBNLayer(
        lbn_input_shape,
        n_particles=max(params['n_particles'],
                        1),  # Hack so that 0 does not trigger error
        boost_mode=LBN.PAIRS,
        features=["E", "px", "py", "pz", "pt", "p", "m", "pair_cos"],
        name='LBN')(input_lbn_Layer)
    batchnorm = tf.keras.layers.BatchNormalization(name='batchnorm')(lbn_layer)

    # Concatenation of left and right #
    concatenate = tf.keras.layers.Concatenate(axis=-1)(
        [all_features, batchnorm])
    L1 = Dense(params['first_neuron'],
               activation=params['activation'],
               kernel_regularizer=l2(params['l2']))(
                   concatenate if params['n_particles'] > 0 else all_features)
    hidden = hidden_layers(params, 1, batch_normalization=True).API(L1)
    out = Dense(y_train.shape[1],
                activation=params['output_activation'],
                name='out')(hidden)

    # Tensorboard logs #
    #    path_board = os.path.join(parameters.main_path,"TensorBoard")
    #    suffix = 0
    #    while(os.path.exists(os.path.join(path_board,"Run_"+str(suffix)))):
    #        suffix += 1
    #    path_board = os.path.join(path_board,"Run_"+str(suffix))
    #    os.makedirs(path_board)
    #    logging.info("TensorBoard log dir is at %s"%path_board)

    # Callbacks #
    # Early stopping to stop learning if val_loss plateau for too long #
    early_stopping = EarlyStopping(**parameters.early_stopping_params)
    # Reduce learnign rate in case of plateau #
    reduceLR = ReduceLROnPlateau(**parameters.reduceLR_params)
    # Custom loss function plot for debugging #
    loss_history = LossHistory()
    # Tensorboard for checking live the loss curve #
    #    board = TensorBoard(log_dir=path_board,
    #                        histogram_freq=1,
    #                        batch_size=params['batch_size'],
    #                        write_graph=True,
    #                        write_grads=True,
    #                        write_images=True)
    #    Callback_list = [loss_history,early_stopping,reduceLR,board]
    Callback_list = [loss_history, early_stopping, reduceLR]

    # Compile #
    if 'resume' not in params:  # Normal learning
        # Define model #
        model_inputs = [inputs_all]
        if params['n_particles'] > 0:
            model_inputs.append(input_lbn_Layer)
        model = Model(inputs=model_inputs, outputs=[out])
        initial_epoch = 0
    else:  # a model has to be imported and resumes training
        #custom_objects =  {'PreprocessLayer': PreprocessLayer,'OneHot': OneHot.OneHot}
        logging.info("Loaded model %s" % params['resume'])
        a = Restore(params['resume'],
                    custom_objects=custom_objects,
                    method='h5')
        model = a.model
        initial_epoch = params['initial_epoch']

    model.compile(optimizer=Adam(lr=params['lr']),
                  loss=params['loss_function'],
                  metrics=[
                      tf.keras.metrics.CategoricalAccuracy(),
                      tf.keras.metrics.AUC(multi_label=True),
                      tf.keras.metrics.Precision(),
                      tf.keras.metrics.Recall()
                  ])
    model.summary()

    # Generator #
    training_generator = DataGenerator(
        path=parameters.config,
        inputs=parameters.inputs,
        outputs=parameters.outputs,
        inputsLBN=parameters.LBN_inputs if params['n_particles'] > 0 else None,
        cut=parameters.cut,
        weight=parameters.weight,
        batch_size=params['batch_size'],
        state_set='training',
        model_idx=params['model_idx'] if parameters.crossvalidation else None)
    validation_generator = DataGenerator(
        path=parameters.config,
        inputs=parameters.inputs,
        outputs=parameters.outputs,
        inputsLBN=parameters.LBN_inputs if params['n_particles'] > 0 else None,
        cut=parameters.cut,
        weight=parameters.weight,
        batch_size=params['batch_size'],
        state_set='validation',
        model_idx=params['model_idx'] if parameters.crossvalidation else None)

    # Some verbose logging #
    logging.info("Will use %d workers" % parameters.workers)
    logging.warning("Tensorflow location " + tf.__file__)
    if len(tf.config.experimental.list_physical_devices('XLA_GPU')) > 0:
        logging.info("GPU detected")
    #logging.warning(K.tensorflow_backend._get_available_gpus())
    # Fit #
    history = model.fit_generator(
        generator=training_generator,  # Training data from generator instance
        validation_data=
        validation_generator,  # Validation data from generator instance
        epochs=params['epochs'],  # Number of epochs
        verbose=1,
        max_queue_size=parameters.workers * 2,  # Length of batch queue
        callbacks=Callback_list,  # Callbacks
        initial_epoch=
        initial_epoch,  # In case of resumed training will be different from 0
        workers=parameters.
        workers,  # Number of threads for batch generation (0 : all in same)
        shuffle=True,  # Shuffle order at each epoch
        use_multiprocessing=True)  # Needs to be turned on for queuing batches

    # Plot history #
    PlotHistory(loss_history)

    return history, model
コード例 #9
0
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.utils import plot_model
from model_blocks import optimized_inception_module

input = Input(shape=(256, 256, 3))

# Add inception block
layer = optimized_inception_module(input, 64, 96, 128, 16, 32, 32)

# Add inception block
layer = optimized_inception_module(layer, 128, 128, 192, 32, 96, 64)

model = Model(inputs=input, outputs=layer)

model.summary()

plot_model(model,
           show_shapes=True,
           to_file="artifacts/multi_inception_model.png")
コード例 #10
0
def createResNetV1(inputShape=(128, 128, 3), numClasses=3):
    inputs = Input(shape=inputShape)
    v = resLyr(inputs, lyrName='Input')

    v = Dropout(0.2)(v)

    v = resBlkV1(inputs=v,
                 numFilters=16,
                 numBlocks=3,
                 downsampleOnFirst=False,
                 names='Stg1')

    v = Dropout(0.2)(v)

    v = resBlkV1(inputs=v,
                 numFilters=16,
                 numBlocks=3,
                 downsampleOnFirst=False,
                 names='Stg2')

    v = Dropout(0.2)(v)

    v = resBlkV1(inputs=v,
                 numFilters=32,
                 numBlocks=3,
                 downsampleOnFirst=True,
                 names='Stg3')

    v = Dropout(0.2)(v)

    v = resBlkV1(inputs=v,
                 numFilters=64,
                 numBlocks=3,
                 downsampleOnFirst=True,
                 names='Stg4')

    v = Dropout(0.2)(v)

    v = resBlkV1(inputs=v,
                 numFilters=128,
                 numBlocks=3,
                 downsampleOnFirst=True,
                 names='Stg5')

    v = Dropout(0.2)(v)

    v = resBlkV1(inputs=v,
                 numFilters=256,
                 numBlocks=3,
                 downsampleOnFirst=True,
                 names='Stg6')

    v = Dropout(0.2)(v)

    v = resBlkV1(inputs=v,
                 numFilters=256,
                 numBlocks=3,
                 downsampleOnFirst=False,
                 names='Stg7')

    v = Dropout(0.2)(v)

    v = AveragePooling2D(pool_size=8, name='AvgPool')(v)

    v = Dropout(0.2)(v)

    v = Flatten()(v)

    outputs = Dense(numClasses,
                    activation='softmax',
                    kernel_initializer=he_normal(33))(v)

    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optmz,
                  metrics=['accuracy'])

    return model
コード例 #11
0
ファイル: Model.py プロジェクト: gsaha009/HHbbWWAnalysis
def NeuralNetModel(x_train, y_train, x_val, y_val, params):
    """
    Keras model for the Neural Network, used to scan the hyperparameter space by Talos
    Uses the data provided as inputs
    """
    # Split y = [target,weight], Talos does not leave room for the weight so had to be included in one of the arrays
    w_train = y_train[:, -1]
    w_val = y_val[:, -1]
    y_train = y_train[:, :-1]
    y_val = y_val[:, :-1]

    x_train_lbn = x_train[:, -len(parameters.LBN_inputs):].reshape(
        -1, 4,
        len(parameters.LBN_inputs) // 4)
    x_train = x_train[:, :-len(parameters.LBN_inputs)]

    x_val_lbn = x_val[:, -len(parameters.LBN_inputs):].reshape(
        -1, 4,
        len(parameters.LBN_inputs) // 4)
    x_val = x_val[:, :-len(parameters.LBN_inputs)]

    # Scaler #
    with open(parameters.scaler_path,
              'rb') as handle:  # Import scaler that was created before
        scaler = pickle.load(handle)

    # Design network #

    # Left branch : classic inputs -> Preprocess -> onehot
    inputs_numeric = []
    means = []
    variances = []
    inputs_all = []
    encoded_all = []
    for idx in range(x_train.shape[1]):
        inpName = parameters.inputs[idx].replace('$', '')
        input_layer = tf.keras.Input(shape=(1, ), name=inpName)
        # Categorical inputs #
        if parameters.mask_op[idx]:
            operation = getattr(Operations, parameters.operations[idx])()
            encoded_all.append(operation(input_layer))
        # Numerical inputs #
        else:
            inputs_numeric.append(input_layer)
            means.append(scaler.mean_[idx])
            variances.append(scaler.var_[idx])
        inputs_all.append(input_layer)

    # Concatenate all numerical inputs #
    if int(tf_version[1]) < 4:
        normalizer = preprocessing.Normalization(name='Normalization')
        x_dummy = np.ones((10, len(means)))
        # Needs a dummy to call the adapt method before setting the weights
        normalizer.adapt(x_dummy)
        normalizer.set_weights([np.array(means), np.array(variances)])
    else:
        normalizer = preprocessing.Normalization(mean=means,
                                                 variance=variances,
                                                 name='Normalization')
    encoded_all.append(
        normalizer(tf.keras.layers.concatenate(inputs_numeric,
                                               name='Numerics')))

    if len(encoded_all) > 1:
        all_features = tf.keras.layers.concatenate(encoded_all,
                                                   axis=-1,
                                                   name="Features")
    else:
        all_features = encoded_all[0]

    # Right branch : LBN
    input_lbn_Layer = Input(shape=x_train_lbn.shape[1:], name='LBN_inputs')
    lbn_layer = LBNLayer(
        x_train_lbn.shape[1:],
        n_particles=max(params['n_particles'],
                        1),  # Hack so that 0 does not trigger error
        boost_mode=LBN.PAIRS,
        features=["E", "px", "py", "pz", "pt", "p", "m", "pair_cos"],
        name='LBN')(input_lbn_Layer)
    batchnorm = tf.keras.layers.BatchNormalization(name='batchnorm')(lbn_layer)

    # Concatenation of left and right #
    concatenate = tf.keras.layers.Concatenate(axis=-1)(
        [all_features, batchnorm])
    L1 = Dense(params['first_neuron'],
               activation=params['activation'],
               kernel_regularizer=l2(params['l2']))(
                   concatenate if params['n_particles'] > 0 else all_features)
    hidden = hidden_layers(params, 1, batch_normalization=True).API(L1)
    out = Dense(y_train.shape[1],
                activation=params['output_activation'],
                name='out')(hidden)

    # Check preprocessing #
    preprocess = Model(inputs=inputs_numeric, outputs=encoded_all[-1])
    x_numeric = x_train[:, [not m for m in parameters.mask_op]]
    out_preprocess = preprocess.predict(np.hsplit(x_numeric,
                                                  x_numeric.shape[1]),
                                        batch_size=params['batch_size'])
    mean_scale = np.mean(out_preprocess)
    std_scale = np.std(out_preprocess)
    if abs(mean_scale) > 0.01 or abs(
        (std_scale - 1) /
            std_scale) > 0.1:  # Check that scaling is correct to 1%
        logging.warning(
            "Something is wrong with the preprocessing layer (mean = %0.6f, std = %0.6f), maybe you loaded an incorrect scaler"
            % (mean_scale, std_scale))

    # Tensorboard logs #
    #path_board = os.path.join(parameters.main_path,"TensorBoard")
    #suffix = 0
    #while(os.path.exists(os.path.join(path_board,"Run_"+str(suffix)))):
    #    suffix += 1
    #path_board = os.path.join(path_board,"Run_"+str(suffix))
    #os.makedirs(path_board)
    #logging.info("TensorBoard log dir is at %s"%path_board)

    # Callbacks #
    # Early stopping to stop learning if val_loss plateau for too long #
    early_stopping = EarlyStopping(**parameters.early_stopping_params)
    # Reduce learnign rate in case of plateau #
    reduceLR = ReduceLROnPlateau(**parameters.reduceLR_params)
    # Custom loss function plot for debugging #
    loss_history = LossHistory()
    # Tensorboard for checking live the loss curve #
    #board = TensorBoard(log_dir=path_board,
    #                    histogram_freq=1,
    #                    batch_size=params['batch_size'],
    #                    write_graph=True,
    #                    write_grads=True,
    #                    write_images=True)
    Callback_list = [loss_history, early_stopping, reduceLR]

    # Compile #
    if 'resume' not in params:  # Normal learning
        # Define model #
        model_inputs = [inputs_all]
        if params['n_particles'] > 0:
            model_inputs.append(input_lbn_Layer)
        model = Model(inputs=model_inputs, outputs=[out])
        initial_epoch = 0
    else:  # a model has to be imported and resumes training
        #custom_objects =  {'PreprocessLayer': PreprocessLayer,'OneHot': OneHot.OneHot}
        logging.info("Loaded model %s" % params['resume'])
        a = Restore(params['resume'],
                    custom_objects=custom_objects,
                    method='h5')
        model = a.model
        initial_epoch = params['initial_epoch']

    model.compile(optimizer=Adam(lr=params['lr']),
                  loss=params['loss_function'],
                  metrics=[
                      tf.keras.metrics.CategoricalAccuracy(),
                      tf.keras.metrics.AUC(multi_label=True),
                      tf.keras.metrics.Precision(),
                      tf.keras.metrics.Recall()
                  ])
    model.summary()
    fit_inputs = np.hsplit(x_train, x_train.shape[1])
    fit_val = (np.hsplit(x_val, x_val.shape[1]), y_val, w_val)
    if params['n_particles'] > 0:
        fit_inputs.append(x_train_lbn)
        fit_val[0].append(x_val_lbn)
    # Fit #
    history = model.fit(x=fit_inputs,
                        y=y_train,
                        sample_weight=w_train,
                        epochs=params['epochs'],
                        batch_size=params['batch_size'],
                        verbose=1,
                        validation_data=fit_val,
                        callbacks=Callback_list)

    # Plot history #
    PlotHistory(loss_history, params)

    return history, model
コード例 #12
0
def main():

    # Hyperparameters
    batch = 4
    learning_rate = 0.001
    patience = 5
    weights_path = './weights'
    epochs = 50
    load_pretrained = None
    input_size = (224, 224, 3)

    # Load dataset
    train_generator, valid_generator = HockeyFightDataset(
        batch=batch, size=input_size).dataset()

    # Modeling
    inputs = Input([None, *input_size])
    predictions, end_points = inceptionI3D(inputs,
                                           dropout_keep_prob=0.5,
                                           final_endpoint='Predictions')
    i3d_model = Model(inputs, predictions)
    i3d_model.compile(optimizer=Adam(lr=learning_rate),
                      loss='categorical_crossentropy',
                      metrics=['acc'])

    # Callbacks
    callbacks = []

    tensorboard = TensorBoard()

    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.5,
                                  patience=patience,
                                  verbose=1,
                                  mode='min',
                                  min_lr=1e-6)

    model_checkpoint = ModelCheckpoint(os.path.join(
        weights_path, f'I3D_{batch}batch_{epochs}epochs.h5'),
                                       monitor='val_loss',
                                       verbose=1,
                                       save_best_only=True,
                                       save_weights_only=True,
                                       mode='min')

    callbacks.append(tensorboard)
    callbacks.append(reduce_lr)
    callbacks.append(model_checkpoint)

    # Train!
    i3d_model.fit_generator(generator=train_generator,
                            steps_per_epoch=get_steps_hockey(900, batch),
                            epochs=epochs,
                            callbacks=callbacks,
                            validation_data=valid_generator,
                            validation_steps=get_steps_hockey(100, batch),
                            use_multiprocessing=True,
                            workers=-1)

    # Evaluate
    evaluation = i3d_model.evaluate_generator(generator=valid_generator)

    print(
        f'Evaluation loss : {evaluation["loss"]} , acc : {evaluation["acc"]}')
コード例 #13
0
def res_unet(input_shape=(1024, 1024, 3), chan_num=3):
    inputs = Input(shape=input_shape)
    # 1024
    down0b = conv_layer(inputs, 16, 2, strides=2)
    down0b = conv_layer(down0b, 16, 2)
    down0b_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0b)
    # down0b_pool = conv_layer(inputs, 32, 4, strides = 4, shape= 5)

    # 256

    down0a = conv_layer(down0b_pool, 32, 4)
    down0a = conv_layer(down0a, 32, 4)
    down0a_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0a)
    down0a_pool = add_res_dwn(down0a_pool, down0b_pool, 32)  # res coonect

    # 128

    down0 = conv_layer(down0a_pool, 64, 4)
    down0 = conv_layer(down0, 64, 4)
    down0_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0)
    down0_pool = add_res_dwn(down0_pool, down0a_pool, 64)  # res coonect

    # 64

    down1 = conv_layer(down0_pool, 128, 8)
    down1 = conv_layer(down1, 128, 8)
    down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)
    down1_pool = add_res_dwn(down1_pool, down0_pool, 128)  # res coonect

    # 32

    down2 = conv_layer(down1_pool, 256, 8)
    down2 = conv_layer(down2, 256, 8)
    down2_pool = MaxPooling2D((2, 2), strides=(2, 2))(down2)
    down2_pool = add_res_dwn(down2_pool, down1_pool, 256)  # res coonect
    # 16

    down3 = conv_layer(down2_pool, 512, 16)
    down3 = conv_layer(down3, 512, 16)
    down3 = add_res(down3, down2_pool, 512)

    up2 = UpSampling2D((2, 2))(down3)
    up2_c = concatenate([down2, up2], axis=chan_num)
    up2 = conv_layer(up2_c, 256, 8)
    up2 = conv_layer(up2, 256, 8)
    up2 = add_res(up2, up2_c, 256)

    # 32

    up1 = UpSampling2D((2, 2))(up2)
    up1_c = concatenate([down1, up1], axis=chan_num)
    up1 = conv_layer(up1_c, 128, 8)
    up1 = conv_layer(up1, 128, 8)
    up1 = add_res(up1, up1_c, 128)

    # 64

    up0 = UpSampling2D((2, 2))(up1)
    up0_c = concatenate([down0, up0], axis=chan_num)
    up0 = conv_layer(up0_c, 64, 4)
    up0 = conv_layer(up0, 64, 4)
    up0 = add_res(up0, up0_c, 64)

    # 128

    up0a = UpSampling2D((2, 2))(up0)
    up0a_c = concatenate([down0a, up0a], axis=chan_num)
    up0a = conv_layer(up0a_c, 32, 4)
    up0a = conv_layer(up0a, 32, 4)
    up0a = add_res(up0a, up0a_c, 32)

    # 256

    # up0b = UpSampling2D((2, 2))(up0a)
    # up0b_c = concatenate([down0b, up0b], axis=chan_num)
    up0b_c = UpSampling2D((2, 2))(up0a)
    up0b = conv_layer(up0b_c, 8, 2)
    up0b = add_res(up0b, up0b_c, 8)

    # 512

    # Crypt predict
    crypt_fufi = Conv2D(1, (1, 1))(up0b)
    crypt_fufi = layers.Activation('sigmoid', dtype='float32',
                                   name='crypt')(crypt_fufi)

    # just unet
    just_unet = Model(inputs=inputs, outputs=[crypt_fufi, up0a])
    return just_unet
コード例 #14
0
def construct_Resnet18(input_shape=(32, 32, 3), classes=10):
    """
    Implementation of the popular Resnet18 the following architecture:
    CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
    -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER

    Arguments:
    input_shape -- shape of the images of the dataset
    classes -- integer, number of classes

    Returns:
    model -- a Model() instance in tensorflow.keras
    """
    # Define input as a tensor of input shape
    X_input = Input(input_shape)
    X = X_input
    # Zero Padding
    #X = ZeroPadding2D((3, 3))(X_input)

    # Stage 1
    X = Conv2D(64, (7, 7),
               strides=(2, 2),
               name='conv1',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name='bn_conv1')(X)
    X = Activation('relu')(X)
    #X = MaxPooling2D((3, 3), strides=(2, 2))(X)

    # Stage 2
    # X = convolutional_block(
    #     X_input, f=3, filters=[64, 64], stage=2, block='a', s=1)
    X = identity_block(X, 3, [64, 64], stage=2, block='b')
    X = identity_block(X, 3, [64, 64], stage=2, block='c')
    X = identity_block(X, 3, [64, 64], stage=2, block='d')
    # Stage 3
    X = convolutional_block(X, f=3, filters=[64, 128], stage=3, block='a', s=2)
    X = identity_block(X, 3, [64, 128], stage=3, block='b')
    X = identity_block(X, 3, [64, 128], stage=3, block='c')
    # Stage 4
    X = convolutional_block(X,
                            f=3,
                            filters=[128, 256],
                            stage=4,
                            block='a',
                            s=2)
    X = identity_block(X, 3, [128, 256], stage=4, block='b')
    X = identity_block(X, 3, [128, 256], stage=4, block='c')
    # Stage 5
    # X = convolutional_block(
    #     X, f=3, filters=[256, 512], stage=5, block='a', s=2)
    # X = identity_block(X, 3, [256, 512], stage=5, block='b')
    # X = identity_block(X, 3, [256, 512], stage=5, block='c')

    # AVGPOOL
    X = AveragePooling2D((1, 1))(X)

    # Output Layer
    X = Flatten()(X)
    # X = Dense(1000, activation='relu', name='fc10000',
    #           kernel_initializer=glorot_uniform(seed=0))(X)
    X = Dense(classes,
              activation='softmax',
              name='fc' + str(classes),
              kernel_initializer=glorot_uniform(seed=0))(X)

    # Create model
    model = Model(inputs=X_input, outputs=X, name='Resnet18')
    return model
コード例 #15
0
print("MAX_ENCODER_SIZE: ", MAX_ENCODER_SIZE, "MAX_DECODER_SIZE", MAX_DECODER_SIZE )


encoder_input = pad_sequences( encoder_input, maxlen=MAX_ENCODER_SIZE, padding='post', value=0 )
decoder_input = pad_sequences( decoder_input, maxlen=MAX_DECODER_SIZE, padding="post", value=0 )
decoder_output = pad_sequences( decoder_output, maxlen=MAX_DECODER_SIZE, padding="post", value=0 )
print( "encoder input shape", encoder_input.shape, "decoder input shape", decoder_input.shape  )


#Encoder Part
#pass all input through embedding layer, to get embeddings
#pass all embeddings to Bi-LSTM to get all sequences of hidden states (h1...htx)
LATENT_DIM_EN = 50 #M1
LATENT_DIM_DE = 60 #M2

encoder_inp = Input( shape=(MAX_ENCODER_SIZE,) ) #(_,Tx)

encoder_embedding = Embedding( ENCODER_VOCAB_SIZE, ENCODER_EMBEDDING_DIM, weights=[embedding_matrix], trainable=False )
embeddings_en = encoder_embedding( encoder_inp ) #(_,Tx, ENCODER_EMBEDDING_DIM)

encoder_bilstm = Bidirectional( LSTM( LATENT_DIM_EN, return_sequences=True, dropout=0.1, recurrent_dropout=0.1 ) )
hidden_states = encoder_bilstm( embeddings_en ) #(_,Tx, 2*M1)

#Attention Part
#Repear s(t-1) using repeae vector
#concatenate s(t-1) with each hidden state h_t
#Pass it though a neurel network with output of one neuron
#apply softmax over time axis, other wise it alphas will be one
#get weigher hidden states (when we multiple alpha with hidden state)
#sum all weighted hidden state this is context
#last 2 steps can be achieved by dot product over axis=1
コード例 #16
0
def QNN_model(n_classes):
    kern = 8
    n_layers = 9
    inputs = Input(shape=(98, 40))

    # First QConv1D layer
    x = QuaternionConv1D(kern,
                         2,
                         strides=1,
                         activation="relu",
                         padding="valid",
                         use_bias=True)(inputs)
    x = PReLU()(x)

    # Second QConv1D layer
    x = QuaternionConv1D(kern * 2,
                         2,
                         strides=1,
                         activation="relu",
                         padding="valid",
                         use_bias=True)(x)
    x = PReLU()(x)

    # Third QConv1D layer
    x = QuaternionConv1D(kern * 4,
                         2,
                         strides=1,
                         activation="relu",
                         padding="valid",
                         use_bias=True)(x)
    x = PReLU()(x)

    # Fourth QConv1D layer
    x = QuaternionConv1D(kern * 8,
                         2,
                         strides=1,
                         activation="relu",
                         padding="valid",
                         use_bias=True)(x)
    x = PReLU()(x)
    """
    # Conv 1D layers (1-3)
    for i in range(n_layers//3):
        x = QuaternionConv1D(kern*2, 2, strides=1, activation="relu", padding="valid", use_bias=True)(x)
        x = PReLU()(x)
        
    # Conv 1D layers (4-6)
    for i in range(n_layers//3):
        x = QuaternionConv1D(kern*4, 2, strides=1, activation="relu", padding="valid", use_bias=True)(x)
        x = PReLU()(x)
       
    # Conv 1D layers (7-9)
    for i in range(n_layers//3):
        x = QuaternionConv1D(kern*8, 2, strides=1, activation="relu", padding="valid", use_bias=True)(x)
        x = PReLU()(x)
    """

    # FLatten layer
    flat = Flatten()(x)

    # Dense layer 1
    dense = QuaternionDense(256, activation='relu')(flat)
    # Dense layer 2
    dense2 = QuaternionDense(256, activation='relu')(dense)
    # Dense layer 2
    dense3 = QuaternionDense(256, activation='relu')(dense2)

    outputs = Dense(n_classes, activation='softmax')(dense3)

    model = Model(inputs=inputs, outputs=outputs)
    model.summary()

    return model
    return model
コード例 #17
0
def make_yolov3_model():
    input_image = Input(shape=(None, None, 3))

    # Layer  0 => 4
    x = _conv_block(input_image, [{
        'filter': 32,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 0
    }, {
        'filter': 64,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 1
    }, {
        'filter': 32,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 2
    }, {
        'filter': 64,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 3
    }])

    # Layer  5 => 8
    x = _conv_block(x, [{
        'filter': 128,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 5
    }, {
        'filter': 64,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 6
    }, {
        'filter': 128,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 7
    }])

    # Layer  9 => 11
    x = _conv_block(x, [{
        'filter': 64,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 9
    }, {
        'filter': 128,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 10
    }])

    # Layer 12 => 15
    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 12
    }, {
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 13
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 14
    }])

    # Layer 16 => 36
    for i in range(7):
        x = _conv_block(x, [{
            'filter': 128,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 16 + i * 3
        }, {
            'filter': 256,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 17 + i * 3
        }])

    skip_36 = x

    # Layer 37 => 40
    x = _conv_block(x, [{
        'filter': 512,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 37
    }, {
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 38
    }, {
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 39
    }])

    # Layer 41 => 61
    for i in range(7):
        x = _conv_block(x, [{
            'filter': 256,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 41 + i * 3
        }, {
            'filter': 512,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 42 + i * 3
        }])

    skip_61 = x

    # Layer 62 => 65
    x = _conv_block(x, [{
        'filter': 1024,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 62
    }, {
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 63
    }, {
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 64
    }])

    # Layer 66 => 74
    for i in range(3):
        x = _conv_block(x, [{
            'filter': 512,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 66 + i * 3
        }, {
            'filter': 1024,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 67 + i * 3
        }])

    # Layer 75 => 79
    x = _conv_block(x, [{
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 75
    }, {
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 76
    }, {
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 77
    }, {
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 78
    }, {
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 79
    }],
                    skip=False)

    # Layer 80 => 82
    yolo_82 = _conv_block(x, [{
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 80
    }, {
        'filter': 255,
        'kernel': 1,
        'stride': 1,
        'bnorm': False,
        'leaky': False,
        'layer_idx': 81
    }],
                          skip=False)

    # Layer 83 => 86
    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 84
    }],
                    skip=False)
    x = UpSampling2D(2)(x)
    x = concatenate([x, skip_61])

    # Layer 87 => 91
    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 87
    }, {
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 88
    }, {
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 89
    }, {
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 90
    }, {
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 91
    }],
                    skip=False)

    # Layer 92 => 94
    yolo_94 = _conv_block(x, [{
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 92
    }, {
        'filter': 255,
        'kernel': 1,
        'stride': 1,
        'bnorm': False,
        'leaky': False,
        'layer_idx': 93
    }],
                          skip=False)

    # Layer 95 => 98
    x = _conv_block(x, [{
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 96
    }],
                    skip=False)
    x = UpSampling2D(2)(x)
    x = concatenate([x, skip_36])

    # Layer 99 => 106
    yolo_106 = _conv_block(x, [{
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 99
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 100
    }, {
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 101
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 102
    }, {
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 103
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 104
    }, {
        'filter': 255,
        'kernel': 1,
        'stride': 1,
        'bnorm': False,
        'leaky': False,
        'layer_idx': 105
    }],
                           skip=False)

    model = Model(input_image, [yolo_82, yolo_94, yolo_106])
    return model
コード例 #18
0
def nba_ARMA(node2vec_dim):

    channels = 30

    node2vec_input = Input(shape=(62, node2vec_dim))
    node2vec_Veg_input = Input(shape=(31, node2vec_dim))
    A_input = Input(shape=(62, 62))
    A_Veg_input = Input(shape=(31, 31))

    team_inputs = Input(shape=(2, ), dtype=tf.int64)
    line_input = Input(shape=(1, ))
    last_5_input = Input(shape=(10, ))
    one_hot_input = Input(shape=(60, ))

    ARMA = spektral.layers.ARMAConv(
        channels,
        order=4,
        iterations=1,
        share_weights=False,
        gcn_activation='relu',
        dropout_rate=0.2,
        activation='elu',
        use_bias=True,
        kernel_initializer='glorot_uniform',
        bias_initializer='zeros',
        kernel_regularizer=None,
        bias_regularizer=None,
        activity_regularizer=None,
        kernel_constraint=None,
        bias_constraint=None)([node2vec_input, A_input])

    ARMA_Veg = spektral.layers.ARMAConv(
        channels,
        order=4,
        iterations=1,
        share_weights=False,
        gcn_activation='relu',
        dropout_rate=0.2,
        activation='elu',
        use_bias=True,
        kernel_initializer='glorot_uniform',
        bias_initializer='zeros',
        kernel_regularizer=None,
        bias_regularizer=None,
        activity_regularizer=None,
        kernel_constraint=None,
        bias_constraint=None)([node2vec_Veg_input, A_Veg_input])

    #extracts nodes for link prediction

    game_vec = extract_team_GAT.Game_Vec(channels)(
        [team_inputs, ARMA, ARMA_Veg])

    rshp = Reshape((int(np.floor(6 * channels)), ))(game_vec)
    cat = Concatenate()([rshp, one_hot_input])

    dense1 = Dense(int(np.floor(6.5 * channels)), activation='tanh')(cat)
    drop1 = Dropout(.05)(dense1)

    dense2 = Dense(int(np.floor(2 * channels)), activation='tanh')(drop1)
    drop2 = Dropout(.05)(dense2)

    drop2 = Concatenate()([drop2, last_5_input])

    dense3 = Dense(int(np.floor(channels / 2)))(drop2)
    drop3 = Dropout(.05)(dense3)

    add_line = Concatenate()([drop3, line_input])

    prediction = Dense(1)(add_line)

    model = Model(inputs=[
        team_inputs, line_input, node2vec_input, A_input, node2vec_Veg_input,
        A_Veg_input, last_5_input, one_hot_input
    ],
                  outputs=prediction)

    return model
コード例 #19
0
ファイル: rnnlm.py プロジェクト: forest1102/LSTM-Test
from tensorflow.keras.utils import plot_model
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.losses import sparse_categorical_crossentropy

import pickle
import numpy as np
import os
from dataset import ptb
import math

# ハイパーパラメータの設定
batch_size = 20
wordvec_size = 100
hidden_size = 100  # RNNの隠れ状態ベクトルの要素数
time_size = 35  # RNNを展開するサイズ
lr = 20.0
max_epoch = 4
max_grad = 0.25

input = Input(batch_shape=(batch_size, None))
output = Embedding(vocab_size, wordvec_size)(input)
output = LSTM(hidden_size,
              return_sequences=True,
              stateful=True,
              )(output)
output = Dense(vocab_size)(output)

model = Model(input, output)
コード例 #20
0
def discriminator(node2vec_dim):

    channels = 40

    feature_input = Input(shape=(62, node2vec_dim))
    feature_Veg_input = Input(shape=(31, node2vec_dim))
    feature_M_input = Input(shape=(31, node2vec_dim))
    A_input = Input(shape=(62, 62))
    A_Veg_input = Input(shape=(31, 31))
    M_Graph_input = Input(shape=(31, 31))

    team_inputs = Input(shape=(2, ), dtype=tf.int64)
    line_input = Input(shape=(1, ))
    model_input = Input(shape=(1, ))
    last_5_input = Input(shape=(10, ))
    one_hot_input = Input(shape=(60, ))

    A_input_sp = extract_team_GAT.To_Sparse()(A_input)
    A_Veg_input_sp = extract_team_GAT.To_Sparse()(A_Veg_input)
    M_Graph_input_sp = extract_team_GAT.To_Sparse()(M_Graph_input)

    GIN = spektral.layers.GINConv(
        channels,
        epsilon=None,
        mlp_hidden=[channels, channels],
        mlp_activation='relu',
        aggregate='sum',
        activation=None,
        use_bias=True,
        kernel_initializer='glorot_uniform',
        bias_initializer='zeros',
        kernel_regularizer=None,
        bias_regularizer=None,
        activity_regularizer=None,
        kernel_constraint=None,
        bias_constraint=None)([feature_input, A_input_sp])

    GIN_Veg = spektral.layers.GINConv(
        channels,
        epsilon=None,
        mlp_hidden=[channels, channels],
        mlp_activation='relu',
        aggregate='sum',
        activation=None,
        use_bias=True,
        kernel_initializer='glorot_uniform',
        bias_initializer='zeros',
        kernel_regularizer=None,
        bias_regularizer=None,
        activity_regularizer=None,
        kernel_constraint=None,
        bias_constraint=None)([feature_Veg_input, A_Veg_input_sp])

    GIN_M = spektral.layers.GINConv(
        channels,
        epsilon=None,
        mlp_hidden=[channels, channels],
        mlp_activation='relu',
        aggregate='sum',
        activation=None,
        use_bias=True,
        kernel_initializer='glorot_uniform',
        bias_initializer='zeros',
        kernel_regularizer=None,
        bias_regularizer=None,
        activity_regularizer=None,
        kernel_constraint=None,
        bias_constraint=None)([feature_M_input, M_Graph_input_sp])

    game_vec = extract_team_GAT.Game_Vec_D(channels)(
        [team_inputs, GIN, GIN_Veg, GIN_M])

    rshp = Reshape((int(np.floor(8 * channels)), ))(game_vec)
    cat = Concatenate()([rshp, one_hot_input, line_input, model_input])

    dense1 = Dense(int(np.floor(8.5 * channels)), activation='tanh')(cat)
    drop1 = Dropout(.01)(dense1)

    dense2 = Dense(int(np.floor(4 * channels)), activation='tanh')(drop1)
    drop2 = Dropout(.01)(dense2)

    drop2 = Concatenate()([drop2, last_5_input])

    dense3 = Dense(int(np.floor(channels / 2)))(drop2)
    drop3 = Dropout(.01)(dense3)

    prediction = Dense(2, activation='softmax')(drop3)

    #extracts nodes for link prediction

    model = Model(inputs=[
        team_inputs, line_input, model_input, feature_input, A_input,
        feature_Veg_input, A_Veg_input, feature_M_input, M_Graph_input,
        last_5_input, one_hot_input
    ],
                  outputs=prediction)

    return model
コード例 #21
0
ファイル: detection.py プロジェクト: sungbeom90/taxocrproject
 def _build(self, **kwargs):
     inputs = Input(shape=[1024, 1024, 1])
     outputs = self.call(inputs)
     super(Detection_model, self).__init__(inputs=inputs,
                                           outputs=outputs,
                                           **kwargs)
コード例 #22
0
def nba_gen(node2vec_dim):

    channels = 40

    node2vec_input = Input(shape=(62, node2vec_dim))
    node2vec_Veg_input = Input(shape=(31, node2vec_dim))
    A_input = Input(shape=(62, 62))
    A_Veg_input = Input(shape=(31, 31))

    A_input_sp = extract_team_GAT.To_Sparse()(A_input)
    A_Veg_input_sp = extract_team_GAT.To_Sparse()(A_Veg_input)

    team_inputs = Input(shape=(2, ), dtype=tf.int64)
    line_input = Input(shape=(1, ))
    last_5_input = Input(shape=(10, ))
    one_hot_input = Input(shape=(60, ))

    conv = spektral.layers.GeneralConv(
        channels=channels,
        batch_norm=True,
        dropout=0.0,
        aggregate='sum',
        activation='relu',
        use_bias=True,
        kernel_initializer='glorot_uniform',
        bias_initializer='zeros',
        kernel_regularizer=None,
        bias_regularizer=None,
        activity_regularizer=None,
        kernel_constraint=None,
        bias_constraint=None)([node2vec_input, A_input_sp])

    conv_veg = spektral.layers.GeneralConv(
        channels=channels,
        batch_norm=True,
        dropout=0.0,
        aggregate='sum',
        activation='relu',
        use_bias=True,
        kernel_initializer='glorot_uniform',
        bias_initializer='zeros',
        kernel_regularizer=None,
        bias_regularizer=None,
        activity_regularizer=None,
        kernel_constraint=None,
        bias_constraint=None)([node2vec_Veg_input, A_Veg_input_sp])

    #extracts nodes for link prediction

    game_vec = extract_team_GAT.Game_Vec(channels)(
        [team_inputs, conv, conv_veg])

    rshp = Reshape((int(np.floor(6 * channels)), ))(game_vec)
    cat = Concatenate()([rshp, one_hot_input])

    dense1 = Dense(int(np.floor(6.5 * channels)), activation='tanh')(cat)
    drop1 = Dropout(.01)(dense1)

    dense2 = Dense(int(np.floor(2 * channels)), activation='tanh')(drop1)
    drop2 = Dropout(.01)(dense2)

    drop2 = Concatenate()([drop2, last_5_input])

    dense3 = Dense(int(np.floor(channels / 2)))(drop2)
    drop3 = Dropout(.01)(dense3)

    add_line = Concatenate()([drop3, line_input])

    prediction = Dense(1)(add_line)

    model = Model(inputs=[
        team_inputs, line_input, node2vec_input, A_input, node2vec_Veg_input,
        A_Veg_input, last_5_input, one_hot_input
    ],
                  outputs=prediction)

    return model
コード例 #23
0
ファイル: build.py プロジェクト: Symfomany/keras
  conv1 = Convolution2D(16, 2, padding = 'same', activation = 'relu')(input_layer)
  pool1 = MaxPool2D(pool_size = 2)(conv1)
  
  conv2 = Convolution2D(32, 2, padding = 'same', activation = 'relu')(pool1)
  pool2 = MaxPool2D(pool_size = 2)(conv2)
    
  flat = Flatten()(pool2)
  dense = Dense(128, activation = 'relu')(flat)
    
  output = Dense(10, activation  = 'softmax', name = "output_node")(dense)
  return output



#define input layer
inpt = Input(shape = (28,28,1), name = "input_node")

#call the model
logits = CNN(inpt)

#define model
model = Model(inpt,logits)

#compile the model
model.compile(optimizer = keras.optimizers.Adam(lr = 0.0001), \
              loss = 'categorical_crossentropy', metrics = ['accuracy'])

#convert to an Estimator
# the model_dir states where the graph and checkpoint files will be saved to
estimator_model = tf.keras.estimator.model_to_estimator(keras_model = model, \
                                                        model_dir = './models')
コード例 #24
0
def build_and_load_model(model_capacity):
    """
    Build the CNN model and load the weights

    Parameters
    ----------
    model_capacity : 'tiny', 'small', 'medium', 'large', or 'full'
        String specifying the model capacity, which determines the model's
        capacity multiplier to 4 (tiny), 8 (small), 16 (medium), 24 (large),
        or 32 (full). 'full' uses the model size specified in the paper,
        and the others use a reduced number of filters in each convolutional
        layer, resulting in a smaller model that is faster to evaluate at the
        cost of slightly reduced pitch estimation accuracy.

    Returns
    -------
    model : tensorflow.keras.models.Model
        The pre-trained keras model loaded in memory
    """
    from tensorflow.keras.layers import Input, Reshape, Conv2D, BatchNormalization
    from tensorflow.keras.layers import MaxPool2D, Dropout, Permute, Flatten, Dense
    from tensorflow.keras.models import Model

    if models[model_capacity] is None:
        capacity_multiplier = {
            'tiny': 4,
            'small': 8,
            'medium': 16,
            'large': 24,
            'full': 32
        }[model_capacity]

        layers = [1, 2, 3, 4, 5, 6]
        filters = [n * capacity_multiplier for n in [32, 4, 4, 4, 8, 16]]
        widths = [512, 64, 64, 64, 64, 64]
        strides = [(4, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)]

        x = Input(shape=(1024, ), name='input', dtype='float32')
        y = Reshape(target_shape=(1024, 1, 1), name='input-reshape')(x)

        for l, f, w, s in zip(layers, filters, widths, strides):
            y = Conv2D(f, (w, 1),
                       strides=s,
                       padding='same',
                       activation='relu',
                       name="conv%d" % l)(y)
            y = BatchNormalization(name="conv%d-BN" % l)(y)
            y = MaxPool2D(pool_size=(2, 1),
                          strides=None,
                          padding='valid',
                          name="conv%d-maxpool" % l)(y)
            y = Dropout(0.25, name="conv%d-dropout" % l)(y)

        y = Permute((2, 1, 3), name="transpose")(y)
        y = Flatten(name="flatten")(y)
        y = Dense(360, activation='sigmoid', name="classifier")(y)

        model = Model(inputs=x, outputs=y)

        package_dir = os.path.dirname(os.path.realpath(__file__))
        filename = "model-{}.h5".format(model_capacity)
        model.load_weights(os.path.join(package_dir, filename))
        model.compile('adam', 'binary_crossentropy')

        models[model_capacity] = model

    return models[model_capacity]
コード例 #25
0
def build_q_network(learning_rate: float = 0.00001,
                    input_shape: tuple = (84, 84),
                    history_length: int = 4) -> Model:
    """
    Builds a dueling DQN as a Keras model. For a good overview of dueling
    DQNs (and some motivation behind their use) see:
    https://towardsdatascience.com/dueling-deep-q-networks-81ffab672751
    Arg:
        n_actions: Number of possible action the agent can take
        learning_rate: Learning rate
        input_shape: Shape of the preprocessed frame the model sees
        history_length: Number of historical frames the agent can see
    Returns:
        A compiled Keras model
    """
    # Dueling architecture requires a non-sequential step at the end, so
    # Keras's functional API is a natural choice
    model_input = Input(shape=(input_shape[0], input_shape[1], history_length))
    x = Lambda(lambda layer: layer / 255)(model_input)  # normalize by 255

    x = Conv2D(32, (8, 8),
               strides=4,
               kernel_initializer=VarianceScaling(scale=2.),
               activation='relu',
               use_bias=False)(x)
    x = Conv2D(64, (4, 4),
               strides=2,
               kernel_initializer=VarianceScaling(scale=2.),
               activation='relu',
               use_bias=False)(x)
    x = Conv2D(64, (3, 3),
               strides=1,
               kernel_initializer=VarianceScaling(scale=2.),
               activation='relu',
               use_bias=False)(x)
    x = Conv2D(1024, (7, 7),
               strides=1,
               kernel_initializer=VarianceScaling(scale=2.),
               activation='relu',
               use_bias=False)(x)

    # Split into value and advantage streams
    val_stream, adv_stream = Lambda(lambda w: tf.split(w, 2, 3))(
        x)  # custom splitting layer

    # State value estimator
    val_stream = Flatten()(val_stream)
    val = Dense(1, kernel_initializer=VarianceScaling(scale=2.))(val_stream)

    # Advantage value estimator
    # Each of the four actions has its own advantage value
    adv_stream = Flatten()(adv_stream)
    adv = Dense(4, kernel_initializer=VarianceScaling(scale=2.))(adv_stream)

    # Combine streams into Q-Values
    reduce_mean = Lambda(lambda w: tf.reduce_mean(w, axis=1, keepdims=True))
    q_vals = Add()([val, Subtract()([adv, reduce_mean(adv)])])

    # Build model
    model = Model(model_input, q_vals)
    model.compile(Adam(learning_rate), loss=tf.keras.losses.Huber())

    return model
コード例 #26
0
    pad = 'same'
    branch0 = conv2d(x, 192, 1, 1, pad, True, name=name + 'b0')
    branch1 = conv2d(x, 192, 1, 1, pad, True, name=name + 'b1_1')
    branch1 = conv2d(branch1, 224, [1, 3], 1, pad, True, name=name + 'b1_2')
    branch1 = conv2d(branch1, 256, [3, 1], 1, pad, True, name=name + 'b1_3')
    branches = [branch0, branch1]
    mixed = Concatenate(axis=3, name=name + '_mixed')(branches)
    filt_exp_1x1 = conv2d(mixed, 2048, 1, 1, pad, False, name=name + 'fin1x1')
    final_lay = Lambda(lambda inputs, scale: inputs[0] + inputs[1] * scale,
                       output_shape=backend.int_shape(x)[1:],
                       arguments={'scale': scale},
                       name=name + 'act_saling')([x, filt_exp_1x1])
    return final_lay

#stem cell
img_input = Input(shape=(299, 299, 3))

x = conv2d(img_input, 32, 3, 2, 'valid', True, name='conv1')
x = conv2d(x, 32, 3, 1, 'valid', True, name='conv2')
x = conv2d(x, 64, 3, 1, 'valid', True, name='conv3')

x_11 = MaxPooling2D(3,
                    strides=2,
                    padding='valid',
                    name='stem_br_11' + '_maxpool_1')(x)
x_12 = conv2d(x, 96, 3, 2, 'valid', True, name='stem_br_12')

x = Concatenate(axis=3, name='stem_concat_1')([x_11, x_12])

x_21 = conv2d(x, 64, 1, 1, 'same', True, name='stem_br_211')
x_21 = conv2d(x_21, 64, [1, 7], 1, 'same', True, name='stem_br_212')
コード例 #27
0
    def build(self):
        get_custom_objects().update({'mish': Mish(mish)})

        input_sig = Input(shape=self.input_shape)
        x = self._make_stem(input_sig, stem_width=self.stem_width, deep_stem=self.deep_stem)

        if self.preact is False:
            x = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(x)
            x = Activation(self.active)(x)
        if self.verbose:
            print("stem_out", x.shape)

        x = MaxPool2D(pool_size=3, strides=2, padding="same", data_format="channels_last")(x)
        if self.verbose:
            print("MaxPool2D out", x.shape)

        if self.preact is True:
            x = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(x)
            x = Activation(self.active)(x)

        if self.using_cb:
            second_x = x
            second_x = self._make_layer(x, blocks=self.blocks_set[0], filters=64, stride=1, is_first=False)
            second_x_tmp = self._make_Composite_layer(second_x, filters=x.shape[-1], upsample=False)
            if self.verbose: print('layer 0 db_com', second_x_tmp.shape)
            x = Add()([second_x_tmp, x])
        x = self._make_layer(x, blocks=self.blocks_set[0], filters=64, stride=1, is_first=False)
        if self.verbose:
            print("-" * 5, "layer 0 out", x.shape, "-" * 5)

        b1_b3_filters = [64, 128, 256, 512]
        for i in range(3):
            idx = i + 1
            if self.using_cb:
                second_x = self._make_layer(x, blocks=self.blocks_set[idx], filters=b1_b3_filters[idx], stride=2)
                second_x_tmp = self._make_Composite_layer(second_x, filters=x.shape[-1])
                if self.verbose: print('layer {} db_com out {}'.format(idx, second_x_tmp.shape))
                x = Add()([second_x_tmp, x])
            x = self._make_layer(x, blocks=self.blocks_set[idx], filters=b1_b3_filters[idx], stride=2)
            if self.verbose: print('----- layer {} out {} -----'.format(idx, x.shape))

        x = GlobalAveragePooling2D(name='avg_pool')(x)
        if self.verbose:
            print("pool_out:", x.shape)  # remove the concats var

        if self.dropout_rate > 0:
            x = Dropout(self.dropout_rate, noise_shape=None)(x)

        fc_out = Dense(self.n_classes, kernel_initializer="he_normal", use_bias=False, name="fc_NObias")(
            x)  # replace concats to x
        if self.verbose:
            print("fc_out:", fc_out.shape)

        if self.fc_activation:
            fc_out = Activation(self.fc_activation)(fc_out)

        model = models.Model(inputs=input_sig, outputs=fc_out)

        if self.verbose:
            print("Resnest builded with input {}, output{}".format(input_sig.shape, fc_out.shape))
            print("-------------------------------------------")
            print("")

        return model
コード例 #28
0
model = Model(inputs=model_from_file.input, outputs=top_model(model_from_file.output))
model.summary()

for layer in model.layers[:-1]:
    layer.trainable = False

# for layer in model.layers:
#     layer.trainable = True


IMAGE_SIZE = 150
NUM_EPOCHS = 30
STEPS_PER_EPOCH = 10
input_shape=( IMAGE_SIZE, IMAGE_SIZE, 3)
A = Input(shape=input_shape, name = 'anchor')
P = Input(shape=input_shape, name = 'anchorPositive')
N = Input(shape=input_shape, name = 'anchorNegative')

enc_A = model(A)
enc_P = model(P)
enc_N = model(N)

opt = SGD(lr=0.0001, momentum=0.9)

# opt = Adam(learning_rate=0.0001)

tripletModel = Model(inputs=[A, P, N], outputs=[enc_A, enc_P, enc_N])
tripletModel.compile( optimizer = opt, loss = triplet_loss)

gen = batch_generator(10)
コード例 #29
0
ファイル: main.py プロジェクト: omnious/graph_2D_CNN
def main():

    my_date_time = "_".join(
        datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S").split())

    parameters = {
        "path_root": path_root,
        "dataset": dataset,
        "p": p,
        "q": q,
        "definition": definition,
        "n_channels": n_channels,
        "n_folds": n_folds,
        "n_repeats": n_repeats,
        "batch_size": batch_size,
        "epochs": epochs,
        "my_patience": my_patience,
        "drop_rate": drop_rate,
        "data_format": data_format,
        "my_optimizer": my_optimizer,
    }

    name_save = path_root + "/results/" + dataset + "_augmentation_" + my_date_time

    with open(name_save + "_parameters.json", "w") as my_file:
        json.dump(parameters, my_file, sort_keys=True, indent=4)

    print("========== parameters defined and saved to disk ==========")

    regexp_p = re.compile("p=" + p)
    regexp_q = re.compile("q=" + q)

    print("========== loading labels ==========")

    with open(
            path_root + "classes/" + dataset + "/" + dataset + "_classes.txt",
            "r") as f:
        ys = f.read().splitlines()
        ys = [int(elt) for elt in ys]

    num_classes = len(list(set(ys)))

    print("classes:", list(set(ys)))

    print("converting to 0-based index")

    if 0 not in list(set(ys)):
        if -1 not in list(set(ys)):
            ys = [y - 1 for y in ys]
        else:
            ys = [1 if y == 1 else 0 for y in ys]

    print("classes:", list(set(ys)))

    print("========== loading tensors ==========")

    path_read = path_root + "tensors/" + dataset + "/node2vec_hist/"
    file_names = [
        elt for elt in os.listdir(path_read)
        if (str(definition) + ":1" in elt and regexp_p.search(elt)
            and regexp_q.search(elt) and elt.count("p=") == 1
            and elt.count("q=") == 1 and elt.split("_")[-1:][0][0].isdigit())
    ]  # make sure the right files are selected
    file_names.sort(key=natural_keys)
    print(len(file_names))
    print(file_names[:5])
    print(file_names[-5:])

    print("ensuring tensor-label matching")
    kept_idxs = [int(elt.split("_")[-1].split(".")[0]) for elt in file_names]
    print(len(kept_idxs))
    print(kept_idxs[:5])
    print(kept_idxs[-5:])
    print("removing", len(ys) - len(kept_idxs), "labels")
    ys = [y for idx, y in enumerate(ys) if idx in kept_idxs]

    print(len(file_names) == len(ys))

    print("converting labels to array")
    ys = np.array(ys)

    print("transforming integer labels into one-hot vectors")
    ys = to_categorical(ys, num_classes)

    tensors = []
    for name in file_names:
        tensor = np.load(path_read + name)
        tensors.append(tensor[:n_channels, :, :])

    tensors = np.array(tensors)
    tensors = tensors.astype("float32")

    print("tensors shape:", tensors.shape)

    print("========== getting image dimensions ==========")

    img_rows, img_cols = int(tensors.shape[2]), int(tensors.shape[3])
    input_shape = (int(tensors.shape[1]), img_rows, img_cols)

    print("input shape:", input_shape)

    print("========== shuffling data ==========")

    shuffled_idxs = random.sample(range(tensors.shape[0]),
                                  int(tensors.shape[0]))  # sample w/o replct
    tensors = tensors[shuffled_idxs]
    ys = ys[shuffled_idxs]

    print("========== conducting", n_folds, "fold cross validation ==========")
    print("repeating each fold:", n_repeats, "times")

    folds = np.array_split(tensors, n_folds, axis=0)

    print("fold sizes:", [len(fold) for fold in folds])

    folds_labels = np.array_split(ys, n_folds, axis=0)

    outputs = []
    histories = []

    for i in range(n_folds):

        t = time.time()

        x_train = np.concatenate(
            [fold for j, fold in enumerate(folds) if j != i], axis=0)
        x_test = [fold for j, fold in enumerate(folds) if j == i]

        y_train = np.concatenate(
            [y for j, y in enumerate(folds_labels) if j != i], axis=0)
        y_test = [y for j, y in enumerate(folds_labels) if j == i]

        for repeating in range(n_repeats):

            print("clearing Keras session")
            clear_session()

            my_input = Input(shape=input_shape, dtype="float32")

            conv_1 = Conv2D(
                64,
                kernel_size=(3, 3),
                padding="valid",
                activation="relu",
                data_format=data_format,
            )(my_input)

            pooled_conv_1 = MaxPooling2D(pool_size=(2, 2),
                                         data_format=data_format)(conv_1)

            pooled_conv_1_dropped = Dropout(drop_rate)(pooled_conv_1)

            conv_11 = Conv2D(
                96,
                kernel_size=(3, 3),
                padding="valid",
                activation="relu",
                data_format=data_format,
            )(pooled_conv_1_dropped)

            pooled_conv_11 = MaxPooling2D(pool_size=(2, 2),
                                          data_format=data_format)(conv_11)

            pooled_conv_11_dropped = Dropout(drop_rate)(pooled_conv_11)
            pooled_conv_11_dropped_flat = Flatten()(pooled_conv_11_dropped)

            conv_2 = Conv2D(
                64,
                kernel_size=(4, 4),
                padding="valid",
                activation="relu",
                data_format=data_format,
            )(my_input)

            pooled_conv_2 = MaxPooling2D(pool_size=(2, 2),
                                         data_format=data_format)(conv_2)
            pooled_conv_2_dropped = Dropout(drop_rate)(pooled_conv_2)

            conv_22 = Conv2D(
                96,
                kernel_size=(4, 4),
                padding="valid",
                activation="relu",
                data_format=data_format,
            )(pooled_conv_2_dropped)

            pooled_conv_22 = MaxPooling2D(pool_size=(2, 2),
                                          data_format=data_format)(conv_22)
            pooled_conv_22_dropped = Dropout(drop_rate)(pooled_conv_22)
            pooled_conv_22_dropped_flat = Flatten()(pooled_conv_22_dropped)

            conv_3 = Conv2D(
                64,
                kernel_size=(5, 5),
                padding="valid",
                activation="relu",
                data_format=data_format,
            )(my_input)

            pooled_conv_3 = MaxPooling2D(pool_size=(2, 2),
                                         data_format=data_format)(conv_3)
            pooled_conv_3_dropped = Dropout(drop_rate)(pooled_conv_3)

            conv_33 = Conv2D(
                96,
                kernel_size=(5, 5),
                padding="valid",
                activation="relu",
                data_format=data_format,
            )(pooled_conv_3_dropped)

            pooled_conv_33 = MaxPooling2D(pool_size=(2, 2),
                                          data_format=data_format)(conv_33)
            pooled_conv_33_dropped = Dropout(drop_rate)(pooled_conv_33)
            pooled_conv_33_dropped_flat = Flatten()(pooled_conv_33_dropped)

            conv_4 = Conv2D(
                64,
                kernel_size=(6, 6),
                padding="valid",
                activation="relu",
                data_format=data_format,
            )(my_input)

            pooled_conv_4 = MaxPooling2D(pool_size=(2, 2),
                                         data_format=data_format)(conv_4)
            pooled_conv_4_dropped = Dropout(drop_rate)(pooled_conv_4)

            conv_44 = Conv2D(
                96,
                kernel_size=(6, 6),
                padding="valid",
                activation="relu",
                data_format=data_format,
            )(pooled_conv_4_dropped)

            pooled_conv_44 = MaxPooling2D(pool_size=(2, 2),
                                          data_format=data_format)(conv_44)
            pooled_conv_44_dropped = Dropout(drop_rate)(pooled_conv_44)
            pooled_conv_44_dropped_flat = Flatten()(pooled_conv_44_dropped)

            merge = Concatenate()([
                pooled_conv_11_dropped_flat,
                pooled_conv_22_dropped_flat,
                pooled_conv_33_dropped_flat,
                pooled_conv_44_dropped_flat,
            ])

            merge_dropped = Dropout(drop_rate)(merge)

            dense = Dense(128, activation="relu")(merge_dropped)

            dense_dropped = Dropout(drop_rate)(dense)

            prob = Dense(num_classes, activation="softmax")(dense_dropped)

            # instantiate model
            model = Model(my_input, prob)

            # configure model for training
            model.compile(
                loss="categorical_crossentropy",
                optimizer=my_optimizer,
                metrics=["accuracy"],
            )

            print("model compiled")
            plot_model(model, to_file="model.png")
            print(model.summary())

            early_stopping = EarlyStopping(
                monitor=
                "val_accuracy",  # go through epochs as long as acc on validation set increases
                patience=my_patience,
                mode="max",
            )

            model.fit(
                x_train,
                y_train,
                batch_size=batch_size,
                epochs=epochs,
                validation_data=(x_test, y_test),
                callbacks=[early_stopping],
            )

            # save [min loss,max acc] on test set
            max_acc = max(model.history.history["val_accuracy"])
            max_idx = model.history.history["val_accuracy"].index(max_acc)
            output = [model.history.history["val_loss"][max_idx], max_acc]
            # convert outputs from float32 to float for json serialization
            outputs.append([float(x) for x in output])

            # also save full history for sanity checking
            float_dict = dict()
            for key, value in model.history.history.items():
                float_dict[key] = [float(x) for x in value]
            histories.append(float_dict)

        print(
            "**** fold",
            i + 1,
            "done in " + str(math.ceil(time.time() - t)) + " second(s) ****",
        )

    # save results to disk
    with open(name_save + "_results.json", "w") as my_file:
        json.dump(
            {
                "outputs": outputs,
                "histories": histories
            },
            my_file,
            sort_keys=False,
            indent=4,
        )

    print("========== results saved to disk ==========")
コード例 #30
0
def bayesian_vnet(
    n_classes=1,
    input_shape=(256, 256, 256, 1),
    kernel_size=3,
    prior_fn=prior_fn_for_bayesian(),
    kernel_posterior_fn=default_mean_field_normal_fn(),
    kld=None,
    activation="relu",
    padding="SAME",
):

    inputs = Input(input_shape)

    conv1, pool1 = down_stage(inputs,
                              16,
                              kernel_size=kernel_size,
                              activation=activation,
                              padding=padding)
    conv2, pool2 = down_stage(pool1,
                              32,
                              kernel_size=kernel_size,
                              activation=activation,
                              padding=padding)
    conv3, pool3 = down_stage(pool2,
                              64,
                              kernel_size=kernel_size,
                              activation=activation,
                              padding=padding)
    conv4, _ = down_stage(pool3,
                          128,
                          kernel_size=kernel_size,
                          activation=activation,
                          padding=padding)

    conv5 = up_stage(
        conv4,
        conv3,
        64,
        prior_fn,
        kernel_posterior_fn,
        kld,
        kernel_size=kernel_size,
        activation=activation,
        padding=padding,
    )
    conv6 = up_stage(
        conv5,
        conv2,
        32,
        prior_fn,
        kernel_posterior_fn,
        kld,
        kernel_size=kernel_size,
        activation=activation,
        padding=padding,
    )
    conv7 = up_stage(
        conv6,
        conv1,
        16,
        prior_fn,
        kernel_posterior_fn,
        kld,
        kernel_size=kernel_size,
        activation=activation,
        padding=padding,
    )

    conv8 = end_stage(
        conv7,
        prior_fn,
        kernel_posterior_fn,
        kld,
        n_classes=n_classes,
        kernel_size=kernel_size,
        activation=activation,
        padding=padding,
    )

    return Model(inputs=inputs, outputs=conv8)