コード例 #1
0
ファイル: yolov3.py プロジェクト: tienthienhd/yolov3
def yolov3_net(cfg_file, num_classes):
    """
    Build model yolo from config file
    :param cfg_file:
    :param num_classes:
    :return:
    """
    blocks = parse_cfg(cfg_file)

    model_size = int(blocks[0]['width']), int(blocks[0]['height']), int(
        blocks[0]['channels'])

    outputs = {}
    output_filters = []
    filters = []
    out_pred = []
    scale = 0

    inputs = input_image = Input(shape=model_size)
    inputs = inputs / 255.0
    for i, block in enumerate(blocks[1:]):
        if block['type'] == 'convolutional':
            activation = block['activation']
            filters = int(block['filters'])
            kernel_size = int(block['size'])
            strides = int(block['stride'])

            if strides > 1:
                # downsampling is performed, so we need to adjust the padding
                inputs = ZeroPadding2D(((1, 0), (1, 0)))(inputs)

            inputs = Conv2D(
                filters,
                kernel_size,
                strides=strides,
                padding='valid' if strides > 1 else 'same',
                name='conv_' + str(i),
                use_bias=False if "batch_normalize" in block else True)(inputs)
            if "batch_normalize" in block:
                inputs = BatchNormalization(name="batch_normalize_" +
                                            str(i))(inputs)
            if activation == 'leaky':
                inputs = LeakyReLU(alpha=0.1, name="leaky_" + str(i))(inputs)

        elif block['type'] == 'upsample':
            stride = int(block['stride'])
            inputs = UpSampling2D(stride)(inputs)

        elif block['type'] == 'route':
            block['layers'] = block['layers'].split(',')
            start = int(block['layers'][0])

            if len(block['layers']) > 1:
                end = int(block['layers'][1]) - i
                filters = output_filters[i + start] + output_filters[
                    end]  # Index negatif :end - index
                inputs = tf.concat([outputs[i + start], outputs[i + end]],
                                   axis=-1)
            else:
                filters = output_filters[i + start]
                inputs = outputs[i + start]

        elif block['type'] == 'shortcut':
            from_ = int(block['from'])
            inputs = outputs[i - 1] + outputs[i + from_]

        elif block['type'] == 'yolo':
            mask = block['mask'].split(',')
            mask = [int(x) for x in mask]
            anchors = block['anchors'].split(',')
            anchors = [int(x) for x in anchors]
            anchors = [(anchors[i], anchors[i + 1])
                       for i in range(0, len(anchors), 2)]
            anchors = [anchors[i] for i in mask]
            n_anchors = len(anchors)

            out_shape = inputs.get_shape().as_list()

            inputs = tf.reshape(
                inputs,
                [-1, n_anchors * out_shape[1] * out_shape[2], 5 + num_classes])
            box_centers = inputs[:, :, 0:2]
            box_shapes = inputs[:, :, 2:4]
            confidence = inputs[:, :, 4:5]
            classes = inputs[:, :, 5:5 + num_classes]

            # Refile bounding boxes
            box_centers = tf.sigmoid(box_centers)
            confidence = tf.sigmoid(confidence)
            classes = tf.sigmoid(classes)

            anchors = tf.tile(anchors, [out_shape[1] * out_shape[2], 1])
            box_shapes = tf.exp(box_shapes) * tf.cast(anchors,
                                                      dtype=tf.float32)

            x = tf.range(out_shape[1], dtype=tf.float32)
            y = tf.range(out_shape[2], dtype=tf.float32)

            cx, cy = tf.meshgrid(x, y)
            cx = tf.reshape(cx, (-1, 1))
            cy = tf.reshape(cy, (-1, 1))
            cxy = tf.concat([cx, cy], axis=-1)
            cxy = tf.tile(cxy, [1, n_anchors])
            cxy = tf.reshape(cxy, [1, -1, 2])

            strides = (input_image.get_shape().as_list()[1] // out_shape[1],
                       input_image.get_shape().as_list()[1] // out_shape[2])

            box_centers = (box_centers + cxy) * strides

            prediction = tf.concat(
                [box_centers, box_shapes, confidence, classes], axis=-1)
            if scale:
                out_pred = tf.concat([out_pred, prediction], axis=1)
                scale += 1
            else:
                out_pred = prediction
                scale = 1
        outputs[i] = inputs
        output_filters.append(filters)
    model = Model(input_image, out_pred)
    # model.summary()
    print(model.outputs)
    return model
コード例 #2
0
mixed_precision.set_policy(policy)

datagen = ImageDataGenerator(rescale=1. / 255, validation_split=0.2, horizontal_flip=True)
train_csv = pd.read_csv(r"/content/train.csv")
train_csv["label"] = train_csv["label"].astype(str)

base_model = tf.keras.applications.ResNet50(weights='imagenet', input_shape=(512, 512, 3), include_top=True)

base_model.trainable = True

model = tf.keras.Sequential([

	tf.keras.layers.Input((512, 512, 3)),
	tf.keras.layers.BatchNormalization(renorm=True),
	base_model,
	BatchNormalization(),
	tf.keras.layers.LeakyReLU(),
	tf.keras.layers.Flatten(),
	tf.keras.layers.Dense(512),
	BatchNormalization(),

	tf.keras.layers.LeakyReLU(),
	tf.keras.layers.Dense(256),
	BatchNormalization(),

	tf.keras.layers.LeakyReLU(),

	tf.keras.layers.Dense(128),
	BatchNormalization(),

	tf.keras.layers.LeakyReLU(),
コード例 #3
0
def lrelu_bn(inputs):
    lrelu = LeakyReLU()(inputs)
    bn = BatchNormalization()(lrelu)
    return bn
コード例 #4
0
def predict_model(input_size, epochs=200, lr=1e-1):    
    inputs = Input(shape=input_size, name='inputs')

    # Embedding input
    wday_input = Input(shape=(1,), name='wday')
    month_input = Input(shape=(1,), name='month')
    year_input = Input(shape=(1,), name='year')
    mday_input = Input(shape=(1,), name='mday')
    quarter_input = Input(shape=(1,), name='quarter')
    event_name_1_input = Input(shape=(1,), name='event_name_1')
    event_type_1_input = Input(shape=(1,), name='event_type_1')
    event_name_2_input = Input(shape=(1,), name='event_name_2')
    event_type_2_input = Input(shape=(1,), name='event_type_2')
    item_id_input = Input(shape=(1,), name='item_id')
    dept_id_input = Input(shape=(1,), name='dept_id')
    store_id_input = Input(shape=(1,), name='store_id')
    cat_id_input = Input(shape=(1,), name='cat_id')
    state_id_input = Input(shape=(1,), name='state_id')
    snap_CA_input = Input(shape=(1,), name='snap_CA')
    snap_TX_input = Input(shape=(1,), name='snap_TX')
    snap_WI_input = Input(shape=(1,), name='snap_WI')


    wday_emb = Flatten()(Embedding(7, 1)(wday_input))
    month_emb = Flatten()(Embedding(12, 2)(month_input))
    year_emb = Flatten()(Embedding(6, 1)(year_input))
    mday_emb = Flatten()(Embedding(31, 2)(mday_input))
    quarter_emb = Flatten()(Embedding(4, 1)(quarter_input))
    event_name_1_emb = Flatten()(Embedding(31, 2)(event_name_1_input))
    event_type_1_emb = Flatten()(Embedding(5, 1)(event_type_1_input))
    event_name_2_emb = Flatten()(Embedding(5, 1)(event_name_2_input))
    event_type_2_emb = Flatten()(Embedding(5, 1)(event_type_2_input))

    item_id_emb = Flatten()(Embedding(3049, 4)(item_id_input))
    dept_id_emb = Flatten()(Embedding(7, 1)(dept_id_input))
    store_id_emb = Flatten()(Embedding(10, 1)(store_id_input))
    cat_id_emb = Flatten()(Embedding(6, 1)(cat_id_input))
    state_id_emb = Flatten()(Embedding(3, 1)(state_id_input))
    

    input_data = Concatenate(-1)([inputs, wday_emb, month_emb, month_emb, year_emb, mday_emb, \
                        quarter_emb, event_name_1_emb, event_type_1_emb, event_name_2_emb, \
                        event_type_2_emb, item_id_emb, dept_id_emb, store_id_emb, cat_id_emb, \
                        state_id_emb])

    # x = Dense(1024, activation='relu')(x)
    # x = BatchNormalization()(x)
    x = Dense(512, activation='relu')(input_data)
    x = BatchNormalization()(x)  
    x = Dropout(0.3)(x)      
    x = Dense(256, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dropout(0.3)(x)
    x = Dense(128, activation='relu')(x)

    x_deep = Dense(64, activation='relu')(x)
    x_deep = BatchNormalization()(x_deep)
    x_deep = Dropout(0.3)(x_deep)
    # x_deep = Dense(128, activation='relu')(x_deep)
    # x_deep = BatchNormalization()(x_deep)
    # x_deep = Dropout(0.3)(x_deep)
    x_deep = Dense(input_data.shape[1], activation='relu')(x_deep)

    # x = Concatenate(-1)([inputs, x])
    # x_res = Dense(64, activation='relu')(x)
    # x_res = BatchNormalization()(x_res)
    # # x_deep = Dropout(0.3)(x_res)
    # x_res = Dense(128, activation='relu')(x_res)
    # x_res = BatchNormalization()(x_res)
    # # x_res = Dropout(0.3)(x_res)
    # x_res = Dense(256, activation='relu')(x_res)

    # x = Concatenate(-1)([x_deep, x_res])
    x = input_data + x_deep

    x = Dense(32, activation='relu')(x)
    x = BatchNormalization()(x)
    x_deep = Dropout(0.3)(x_deep)
    x = Dense(16, activation='relu')(x)
    

    outputs = Dense(1, activation='relu')(x)
    # outputs = resnet_v2(x)
    
    # optimizer = Adam(lr=lr)#Adam(lr=lr)
    input_dic = {
        'inputs': inputs, 'wday': wday_input, 'month': month_input, 'year': year_input,
        'mday': mday_input, 'quarter': quarter_input, 'event_name_1': event_name_1_input,
        'event_type_1': event_type_1_input, 'event_name_2': event_name_2_input,
        'event_type_2': event_type_2_input, 'item_id': item_id_input, 'dept_id': dept_id_input,
        'store_id': store_id_input, 'cat_id': cat_id_input, 'state_id': state_id_input,

    }
    model = Model(input_dic, outputs)#, name='predict_model')
    
    return model
コード例 #5
0
def Conv_BN_ReLU(planes, kernel_size, strides=(1, 1, 1), padding='same', use_bias=False):
    return Sequential([
        Conv3D(planes, kernel_size, strides=strides, padding=padding, use_bias=use_bias),
        BatchNormalization(),
        ReLU()
    ])
コード例 #6
0
def _create_se_resnet(classes, img_input, include_top, initial_conv_filters,
                      filters, depth, width, bottleneck, weight_decay,
                      pooling):
    """Creates a SE ResNet model with specified parameters
    Args:
        initial_conv_filters: number of features for the initial convolution
        include_top: Flag to include the last dense layer
        filters: number of filters per block, defined as a list.
            filters = [64, 128, 256, 512
        depth: number or layers in the each block, defined as a list.
            ResNet-50  = [3, 4, 6, 3]
            ResNet-101 = [3, 6, 23, 3]
            ResNet-152 = [3, 8, 36, 3]
        width: width multiplier for network (for Wide ResNet)
        bottleneck: adds a bottleneck conv to reduce computation
        weight_decay: weight_decay (l2 norm)
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
    Returns: a Keras Model
    """
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    N = list(depth)

    # block 1 (initial conv block)
    x = Conv2D(initial_conv_filters, (7, 7),
               padding='same',
               use_bias=False,
               strides=(2, 2),
               kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay))(img_input)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    # block 2 (projection block)
    for i in range(N[0]):
        if bottleneck:
            x = _resnet_bottleneck_block(x, filters[0], width)
        else:
            x = _resnet_block(x, filters[0], width)

    # block 3 - N
    for k in range(1, len(N)):
        if bottleneck:
            x = _resnet_bottleneck_block(x, filters[k], width, strides=(2, 2))
        else:
            x = _resnet_block(x, filters[k], width, strides=(2, 2))

        for i in range(N[k] - 1):
            if bottleneck:
                x = _resnet_bottleneck_block(x, filters[k], width)
            else:
                x = _resnet_block(x, filters[k], width)

    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    if include_top:
        x = GlobalAveragePooling2D()(x)
        x = Dense(classes,
                  use_bias=False,
                  kernel_regularizer=l2(weight_decay),
                  activation='softmax')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    return x
def build_model_hpconfig(args):

    #parsing and assigning hyperparameter variables from argparse
    conv1_filters=int(args.conv1_filters)
    conv2_filters=int(args.conv2_filters)
    conv3_filters=int(args.conv3_filters)
    window_size=int(args.window_size)
    kernel_regularizer = args.kernel_regularizer
    conv_dropout=float(args.conv2d_dropout)
    pool_size = int(args.pool_size)
    conv2d_activation=args.conv2d_activation
    conv2d_dropout=float(args.conv2d_dropout)
    recurrent_layer1 = int(args.recurrent_layer1)
    recurrent_layer2 = int(args.recurrent_layer2)
    recurrent_dropout = float(args.recurrent_dropout)
    after_recurrent_dropout = float(args.after_recurrent_dropout)
    recurrent_recurrent_dropout = float(args.recurrent_recurrent_dropout)
    optimizer=args.optimizer
    learning_rate = float(args.learning_rate)
    bidirection = args.bidirection
    recurrent_layer = args.recurrent_layer
    dense_dropout = float(args.dense_dropout)
    dense_1 = int(args.dense_1)
    dense_2 = int(args.dense_2)
    dense_3 = int(args.dense_3)
    dense_4 = int(args.dense_4)


    #main input is the length of the amino acid in the protein sequence (700,)
    main_input = Input(shape=(700,), dtype='float32', name='main_input')

    #Embedding Layer used as input to the neural network
    embed = Embedding(output_dim=21, input_dim=21, input_length=700)(main_input)

    #secondary input is the protein profile features
    auxiliary_input = Input(shape=(700,21), name='aux_input')

    #concatenate input layers
    concat = Concatenate(axis=-1)([embed, auxiliary_input])

    conv_layer1 = Convolution1D(conv1_filters, window_size, kernel_regularizer = "l2", padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer1)
    conv2D_act = activations.relu(batch_norm)
    conv_dropout = Dropout(conv_dropout)(conv2D_act)
    # ave_pool_1 = AveragePooling1D(3, 1, padding='same')(conv_dropout)
    max_pool_1D_1 = MaxPooling1D(pool_size=pool_size, strides=1, padding='same')(conv_dropout)

    conv_layer2 = Convolution1D(conv2_filters, window_size, padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer2)
    conv2D_act = activations.relu(batch_norm)
    conv_dropout = Dropout(conv_dropout)(conv2D_act)
    # ave_pool_2 = AveragePooling1D(3, 1, padding='same')(conv_dropout)
    max_pool_1D_2 = MaxPooling1D(pool_size=pool_size, strides=1, padding='same')(conv_dropout)

    conv_layer3 = Convolution1D(conv3_filters, window_size,kernel_regularizer = "l2", padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer3)
    conv2D_act = activations.relu(batch_norm)
    conv_dropout = Dropout(conv_dropout)(conv2D_act)
    max_pool_1D_3 = MaxPooling1D(pool_size=pool_size, strides=1, padding='same')(conv_dropout)

    #concat pooling layers
    conv_features = Concatenate(axis=-1)([max_pool_1D_1, max_pool_1D_2, max_pool_1D_3])

    ######## Recurrent Layers ########
    if (recurrent_layer == 'lstm'):
        if (bidirection):

            #Creating Bidirectional LSTM layers
            lstm_f1 = Bidirectional(LSTM(recurrent_layer1,return_sequences=True,activation = 'tanh', recurrent_activation='sigmoid',dropout=recurrent_dropout, recurrent_dropout=recurrent_recurrent_dropout))(conv_features)
            lstm_f2 = Bidirectional(LSTM(recurrent_layer2, return_sequences=True,activation = 'tanh',recurrent_activation='sigmoid',dropout=recurrent_dropout,recurrent_dropout=recurrent_recurrent_dropout))(lstm_f1)

            #concatenate LSTM with convolutional layers
            concat_features = Concatenate(axis=-1)([lstm_f1, lstm_f2, conv2_features])
            concat_features = Dropout(after_recurrent_dropout)(concat_features)


        else:
            #Creating unidirectional LSTM Layers
            lstm_f1 = LSTM(recurrent_layer1,return_sequences=True,activation = 'tanh', recurrent_activation='sigmoid',dropout=recurrent_dropout,recurrent_dropout=recurrent_recurrent_dropout)(conv_features)

            lstm_f2 = LSTM(recurrent_layer2, return_sequences=True,activation = 'tanh',recurrent_activation='sigmoid',dropout=recurrent_dropout,recurrent_dropout=recurrent_recurrent_dropout)(lstm_f1)

            #concatenate LSTM with convolutional layers
            concat_features = Concatenate(axis=-1)([lstm_f1, lstm_f2, conv_features])
            concat_features = Dropout(after_recurrent_dropout)(concat_features)


    elif (recurrent_layer == 'gru'):
        if (bidirection):

            #Creating Bidirectional GRU layers
            gru_f1 = Bidirectional(GRU(recurrent_layer1,return_sequences=True,activation = 'tanh', recurrent_activation='sigmoid',dropout=recurrent_dropout,recurrent_dropout=recurrent_recurrent_dropout))(conv_features)

            gru_f2 = Bidirectional(GRU(recurrent_layer2, return_sequences=True,activation = 'tanh',recurrent_activation='sigmoid',dropout=recurrent_dropout,recurrent_dropout=recurrent_recurrent_dropout))(gru_f1)

            #concatenate LSTM with convolutional layers
            concat_features = Concatenate(axis=-1)([gru_f1, gru_f2, conv_features])
            concat_features = Dropout(after_recurrent_dropout)(concat_features)


        else:
            #Creating unidirectional GRU Layers
            gru_f1 = GRU(recurrent_layer1,return_sequences=True,activation = 'tanh', recurrent_activation='sigmoid',dropout=recurrent_dropout,recurrent_dropout=recurrent_recurrent_dropout)(conv_features)

            gru_f2 = GRU(recurrent_layer1, return_sequences=True,activation = 'tanh',recurrent_activation='sigmoid',dropout=recurrent_dropout,recurrent_dropout=recurrent_recurrent_dropout)(gru_f1)

            #concatenate LSTM with convolutional layers
            concat_features = Concatenate(axis=-1)([gru_f1, gru_f2, conv_features])
            concat_features = Dropout(after_recurrent_dropout)(concat_features)
    else:

        print('Only LSTM and GRU recurrent layers are used in this model')
        return

    #Dense Fully-Connected DNN layers
    # concat_features = Flatten()(concat_features)
    fc_dense1 = Dense(dense_1, activation='relu')(concat_features)
    fc_dense1_dropout = Dropout(dense_dropout)(fc_dense1)
    fc_dense2 = Dense(dense_2, activation='relu')(fc_dense1_dropout)
    fc_dense2_dropout = Dropout(dense_dropout)(fc_dense2)
    fc_dense3 = Dense(dense_3, activation='relu')(fc_dense2_dropout)
    fc_dense3_dropout = Dropout(dense_dropout)(fc_dense3)

    #Final Output layer with 8 nodes for the 8 output classifications
    # main_output = Dense(8, activation='softmax', name='main_output')(concat_features)
    main_output = Dense(8, activation='softmax', name='main_output')(fc_dense3_dropout)

    #create model from inputs and outputs
    model = Model(inputs=[main_input, auxiliary_input], outputs=[main_output])

    #Set optimizer to be used with the model, default is Adam
    if optimizer == 'adam':
        optimizer = Adam(lr=learning_rate, name='adam')
    elif optimizer == 'sgd':
        optimizer = SGD(lr=0.01, momentum=0.0, nestero=False, name='SGD')
    elif optimizer == 'rmsprop':
        optimizer = RMSprop(learning_rate=learning_rate, centered = True, name='RMSprop')
    elif optimizer == 'adagrad':
        optimizer = Adagrad(learning_rate = learning_rate, name='Adagrad')
    elif optimizer == 'adamax':
        optimizer = Adamax(learning_rate=learning_rate, name='Adamax')
    else:
        optimizer = 'adam'
        optimizer = Adam(lr=learning_rate, name='adam')

    #Nadam & Ftrl optimizers

    #use Adam optimizer
    #optimizer = Adam(lr=0.003)
    #Adam is fast, but tends to over-fit
    #SGD is low but gives great results, sometimes RMSProp works best, SWA can easily improve quality, AdaTune

    #compile model using optimizer and the cateogorical crossentropy loss function
    model.compile(optimizer = optimizer, loss={'main_output': 'categorical_crossentropy'}, metrics=['accuracy', MeanSquaredError(), FalseNegatives(), FalsePositives(), TrueNegatives(), TruePositives(), MeanAbsoluteError(), Recall(), Precision()])

    #get summary of model including its layers and num parameters
    model.summary()

    #set early stopping and checkpoints for model
    earlyStopping = EarlyStopping(monitor='val_loss', patience=5, verbose=1, mode='min')
    checkpoint_path = BUCKET_PATH + "/checkpoints/" + str(datetime.date(datetime.now())) +\
        '_' + str((datetime.now().strftime('%H:%M'))) + ".h5"
    checkpointer = ModelCheckpoint(filepath=checkpoint_path,verbose=1,save_best_only=True, monitor='val_acc', mode='max')

    return model
コード例 #8
0
    def __init_model__(self):
        max_transitions = np.min([
            image_utils.n_downsample(self.train_generator.height),
            image_utils.n_downsample(self.train_generator.width),
        ])

        n_transitions = self.n_transitions
        if isinstance(n_transitions, (int, np.integer)):
            if n_transitions == 0:
                raise ValueError("n_transitions cannot equal zero")
            if n_transitions < 0:
                n_transitions += 1
                n_transitions = max_transitions - np.abs(n_transitions)
                self.n_transitions = n_transitions
            elif 0 < n_transitions <= max_transitions:
                self.n_transitions = n_transitions
            else:
                raise ValueError("n_transitions must be in range {0} "
                                 "< n_transitions <= "
                                 "{1}".format(-max_transitions + 1,
                                              max_transitions))
        else:
            raise TypeError("n_transitions must be integer in range "
                            "{0} < n_transitions <= "
                            "{1}".format(-max_transitions + 1,
                                         max_transitions))

        if self.train_generator.downsample_factor < 2:
            raise ValueError(
                "StackedDenseNet is only compatible with `downsample_factor` >= 2."
                "Adjust the TrainingGenerator or choose a different model.")
        if n_transitions <= self.train_generator.downsample_factor:
            raise ValueError(
                "`n_transitions` <= `downsample_factor`. Increase `n_transitions` or decrease `downsample_factor`."
                " If `n_transitions` is -1 (the default), check that your image resolutions can be repeatedly downsampled (are divisible by 2 repeatedly)."
            )
        if self.pretrained:
            if self.input_shape[-1] is 1:
                inputs = Concatenate()([self.inputs] * 3)
                input_shape = self.input_shape[:-1] + (3, )
            else:
                inputs = self.inputs
                input_shape = self.input_shape
            normalized = ImageNetPreprocess("densenet121")(inputs)
            front_outputs = ImageNetFrontEnd(
                input_shape=input_shape,
                n_downsample=self.train_generator.downsample_factor,
                compression_factor=self.compression_factor,
            )(normalized)
        else:
            normalized = ImageNormalization()(self.inputs)
            front_outputs = FrontEnd(
                growth_rate=self.growth_rate,
                n_downsample=self.train_generator.downsample_factor,
                compression_factor=self.compression_factor,
                bottleneck_factor=self.bottleneck_factor,
            )(normalized)
        n_downsample = self.n_transitions - self.train_generator.downsample_factor
        outputs = front_outputs
        model_outputs = OutputChannels(self.train_generator.n_output_channels,
                                       name="output_0")(outputs)

        model_outputs_list = [model_outputs]
        outputs.append(BatchNormalization()(model_outputs))
        for idx in range(self.n_stacks):
            outputs = DenseNet(
                growth_rate=self.growth_rate,
                n_downsample=self.n_transitions -
                self.train_generator.downsample_factor,
                downsample_factor=self.train_generator.downsample_factor,
                compression_factor=self.compression_factor,
                bottleneck_factor=self.bottleneck_factor,
            )(outputs)
            outputs.append(Concatenate()(front_outputs))
            outputs.append(BatchNormalization()(model_outputs))
            model_outputs = OutputChannels(
                self.train_generator.n_output_channels,
                name="output_" + str(idx + 1))(outputs)
            model_outputs_list.append(model_outputs)

        self.train_model = Model(self.inputs,
                                 model_outputs_list,
                                 name=self.__class__.__name__)
コード例 #9
0
def build_net(optim):
    """
    This is a Deep Convolutional Neural Network (DCNN). For generalization purpose I used dropouts in regular intervals.
    I used `ELU` as the activation because it avoids dying relu problem but also performed well as compared to LeakyRelu
    at-least in this case. `he_normal` kernel initializer is used as it suits ELU. BatchNormalization is also used for
    better results.
    """
    net = Sequential(name='DCNN')

    net.add(
        Conv2D(filters=64,
               kernel_size=(5, 5),
               input_shape=(img_width, img_height, img_depth),
               activation='elu',
               padding='same',
               kernel_initializer='he_normal',
               name='conv2d_1'))
    net.add(BatchNormalization(name='batchnorm_1'))
    net.add(
        Conv2D(filters=64,
               kernel_size=(5, 5),
               activation='elu',
               padding='same',
               kernel_initializer='he_normal',
               name='conv2d_2'))
    net.add(BatchNormalization(name='batchnorm_2'))

    net.add(MaxPooling2D(pool_size=(2, 2), name='maxpool2d_1'))
    net.add(Dropout(0.4, name='dropout_1'))

    net.add(
        Conv2D(filters=128,
               kernel_size=(3, 3),
               activation='elu',
               padding='same',
               kernel_initializer='he_normal',
               name='conv2d_3'))
    net.add(BatchNormalization(name='batchnorm_3'))
    net.add(
        Conv2D(filters=128,
               kernel_size=(3, 3),
               activation='elu',
               padding='same',
               kernel_initializer='he_normal',
               name='conv2d_4'))
    net.add(BatchNormalization(name='batchnorm_4'))

    net.add(MaxPooling2D(pool_size=(2, 2), name='maxpool2d_2'))
    net.add(Dropout(0.4, name='dropout_2'))

    net.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               activation='elu',
               padding='same',
               kernel_initializer='he_normal',
               name='conv2d_5'))
    net.add(BatchNormalization(name='batchnorm_5'))
    net.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               activation='elu',
               padding='same',
               kernel_initializer='he_normal',
               name='conv2d_6'))
    net.add(BatchNormalization(name='batchnorm_6'))

    net.add(MaxPooling2D(pool_size=(2, 2), name='maxpool2d_3'))
    net.add(Dropout(0.5, name='dropout_3'))

    net.add(Flatten(name='flatten'))

    net.add(
        Dense(128,
              activation='elu',
              kernel_initializer='he_normal',
              name='dense_1'))
    net.add(BatchNormalization(name='batchnorm_7'))

    net.add(Dropout(0.6, name='dropout_4'))

    net.add(Dense(num_classes, activation='softmax', name='out_layer'))

    net.compile(loss='categorical_crossentropy',
                optimizer=optim,
                metrics=['accuracy'])

    net.summary()

    return net
コード例 #10
0
ファイル: models.py プロジェクト: j-abc/autoencoda
def deep_logistic_keras(X,
                        nodes_per_layer=[50, 20, 1],
                        loss_type='binary_crossentropy',
                        optimizer=k.optimizers.Adam(lr=0.001),
                        metrics_list=['accuracy'],
                        do_batch_norm=True,
                        do_dropout=None,
                        activation_type='relu',
                        initializer=k.initializers.RandomNormal(mean=0.0,
                                                                stddev=0.05)):
    """Build a deep NN classifier in Keras.

    Args:
        X (np.ndarray): Array with shape [n_examples, n_features]
                        containing data examples.
        nodes_per_layer (list of int): Number of nodes in each layer.
        loss_type (str): The loss function to minimize.
        optimizer (Keras optimizer): Keras optimizer with which to compile model.
        metrics_list (list of str): Metrics to calculate during training.
        do_batch_norm (bool): Whether to perform batch normalization after each
                              hidden layer.
        do_dropout (float/None): Dropout fraction to use.
        activation_type (str): Type of activation function to apply to hidden
                               layer outputs.
        initializer (Keras initializer): Keras initializer to use for dense layers.

    Returns:
        model (Keras model): Compiled Keras model.
    """
    # Initialize model
    model = Sequential()
    N_layers = len(nodes_per_layer)

    for ilayer in range(N_layers):
        nodes = nodes_per_layer[ilayer]
        last_layer = ilayer == (N_layers - 1)
        # Handles each kind of layer (input, output, hidden) appropriately
        if ilayer == 0:
            model.add(
                Dense(nodes,
                      input_dim=X.shape[1],
                      kernel_initializer=initializer))
        elif ilayer == N_layers - 1:
            assert nodes == 1, 'Output layer should have 1 node.'
            model.add(
                Dense(nodes,
                      activation='sigmoid',
                      kernel_initializer=initializer))
        else:
            model.add(Dense(nodes, kernel_initializer=initializer))
        # Optional batch norm and dropout
        if not last_layer:
            if do_dropout is not None:
                assert do_dropout < 1.0 and do_dropout >= 0.0, \
                       'Dropout must be fraction between 0.0 and 1.0.'
                model.add(Dropout(do_dropout))
            if do_batch_norm:
                model.add(BatchNormalization())
            # Add activation Function
            model.add(Activation(activation_type))
    # Compile
    model.compile(loss=loss_type, optimizer=optimizer, metrics=metrics_list)
    return model
コード例 #11
0
def _bn_relu(input):
    """Helper to build a BN -> relu block
    """
    norm = BatchNormalization(axis=CHANNEL_AXIS)(input)
    return Activation("relu")(norm)
コード例 #12
0
ファイル: UAV2SEN_FuzzyCNN.py プロジェクト: PCdurham/UAV2Sen
#Estimator.add(Conv2D(Nfilters,3,  activation=NAF))
#Estimator.add(Conv2D(Nfilters,3, activation=NAF))
Estimator.add(Flatten())
Estimator.add(
    Dense(64,
          kernel_regularizer=regularizers.l2(0.001),
          kernel_initializer='normal',
          activation=NAF))
Estimator.add(
    BatchNormalization(axis=-1,
                       momentum=0.99,
                       epsilon=0.001,
                       center=True,
                       scale=True,
                       beta_initializer='zeros',
                       gamma_initializer='ones',
                       moving_mean_initializer='zeros',
                       moving_variance_initializer='ones',
                       beta_regularizer=None,
                       gamma_regularizer=None,
                       beta_constraint=None,
                       gamma_constraint=None))
Estimator.add(
    Dense(32,
          kernel_regularizer=regularizers.l2(0.001),
          kernel_initializer='normal',
          activation=NAF))
Estimator.add(
    Dense(16,
          kernel_regularizer=regularizers.l2(0.001),
          kernel_initializer='normal',
コード例 #13
0
def wide_residual_network(x, is_training, params):
    assert 'depth' in params, 'depth must in params'
    assert 'width' in params, 'width must in params'
    assert 'drop_prob' in params, 'drop_prob must in params'
    assert 'out_units' in params, 'out_units must in params'

    depth = params['depth']
    width = params['width']
    drop_prob = params['drop_prob']
    # if use_conv, a 1*1 conv2d will be used for downsampling between groups
    if 'use_conv' in params:
        use_conv = params['use_conv']
    else:
        use_conv = False
    assert (depth - 4) % 6 == 0
    num_residual_units = (depth - 4) // 6
    nb_filters = [x * width for x in [16, 32, 64, 128]]
    prefix = 'main/'
    x = Conv2D(16, 3, strides=(1, 1), padding='same', name=prefix + 'conv')(x)
    in_nb_filters = 16
    for i in range(0, num_residual_units):
        x = residual_block(x,
                           is_training=is_training,
                           in_nb_filters=in_nb_filters,
                           nb_filters=nb_filters[0],
                           block_id=1,
                           stride=1,
                           drop_prob=drop_prob,
                           use_conv=False)
        in_nb_filters = nb_filters[0]

    for i in range(0, num_residual_units):
        stride = 2 if i == 0 else 1
        x = residual_block(x,
                           is_training=is_training,
                           in_nb_filters=in_nb_filters,
                           nb_filters=nb_filters[1],
                           block_id=2,
                           stride=stride,
                           drop_prob=drop_prob,
                           use_conv=use_conv)
        in_nb_filters = nb_filters[1]
    for i in range(0, num_residual_units):
        stride = 2 if i == 0 else 1
        x = residual_block(x,
                           is_training=is_training,
                           in_nb_filters=in_nb_filters,
                           nb_filters=nb_filters[2],
                           block_id=3,
                           stride=stride,
                           drop_prob=drop_prob,
                           use_conv=use_conv)
        in_nb_filters = nb_filters[2]
    for i in range(0, num_residual_units):
        stride = 2 if i == 0 else 1
        x = residual_block(x,
                           is_training=is_training,
                           in_nb_filters=in_nb_filters,
                           nb_filters=nb_filters[3],
                           block_id=4,
                           stride=stride,
                           drop_prob=drop_prob,
                           use_conv=use_conv)
        in_nb_filters = nb_filters[3]
    # x = Conv2D(512, 3, strides=(2, 2), padding='same', name=prefix+'conv_1')(x)
    x = BatchNormalization(name=prefix + 'bn')(x, training=is_training)
    x = tf.nn.relu(x, name=prefix + 'relu')
    x = AveragePooling2D(pool_size=(8, 8),
                         strides=(1, 1),
                         padding='valid',
                         name=prefix + 'pool')(x)
    x = Flatten(name=prefix + 'flatten')(x)
    x = Dense(params['out_units'], name=prefix + 'fc')(x)
    x = BatchNormalization(name=prefix + 'bn_1')(x, training=is_training)
    out = tf.math.l2_normalize(x)
    return out
コード例 #14
0
    def build_generator(self, number_of_filters_per_layer=(128, 64), kernel_size=4):

        model = Sequential()

        # To build the generator, we create the reverse encoder model
        # and simply build the reverse model

        encoder = None
        if self.dimensionality == 2:
             autoencoder, encoder = create_convolutional_autoencoder_model_2d(
                          input_image_size=self.input_image_size,
                          number_of_filters_per_layer=(*(number_of_filters_per_layer[::-1]), self.latent_dimension),
                          convolution_kernel_size=(5, 5),
                          deconvolution_kernel_size=(5, 5))
        else:
             autoencoder, encoder = create_convolutional_autoencoder_model_3d(
                          input_image_size=self.input_image_size,
                          number_of_filters_per_layer=(*(number_of_filters_per_layer[::-1]), self.latent_dimension),
                          convolution_kernel_size=(5, 5, 5),
                          deconvolution_kernel_size=(5, 5, 5))

        encoder_layers = encoder.layers

        penultimate_layer = encoder_layers[len(encoder_layers) - 2]

        model.add(Dense(units=penultimate_layer.output_shape[1],
                        input_dim=self.latent_dimension,
                        activation="relu"))

        conv_layer = encoder_layers[len(encoder_layers) - 3]
        resampled_size = conv_layer.output_shape[1:(self.dimensionality + 2)]
        model.add(Reshape(resampled_size))

        count = 0
        for i in range(len(encoder_layers) - 3, 1, -1):
            conv_layer = encoder_layers[i]
            resampled_size = conv_layer.output_shape[1:(self.dimensionality + 1)]

            if self.dimensionality == 2:
                model.add(ResampleTensorLayer2D(shape=resampled_size,
                                                interpolation_type='linear'))
                model.add(Conv2D(filters=number_of_filters_per_layer[count],
                                 kernel_size=kernel_size,
                                 padding='same'))
            else:
                model.add(ResampleTensorLayer3D(shape=resampled_size,
                                                interpolation_type='linear'))
                model.add(Conv3D(filters=number_of_filters_per_layer[count],
                                 kernel_size=kernel_size,
                                 padding='same'))
            model.add(BatchNormalization(momentum=0.8))
            model.add(Activation(activation='relu'))
            count += 1

        number_of_channels = self.input_image_size[-1]
        spatial_dimensions = self.input_image_size[:self.dimensionality]

        if self.dimensionality == 2:
            model.add(ResampleTensorLayer2D(shape=spatial_dimensions,
                                            interpolation_type='linear'))
            model.add(Conv2D(filters=number_of_channels,
                             kernel_size=kernel_size,
                             padding='same'))
        else:
            model.add(ResampleTensorLayer3D(shape=spatial_dimensions,
                                            interpolation_type='linear'))
            model.add(Conv3D(filters=number_of_channels,
                             kernel_size=kernel_size,
                             padding='same'))

        model.add(Activation(activation="tanh"))

        noise = Input(shape=(self.latent_dimension,))
        image = model(noise)

        generator = Model(inputs=noise, outputs=image)
        return(generator)
コード例 #15
0
def Conv2d_BN(x, nb_filter, kernel_size, strides = (1,1), padding = 'same'):
    x = Conv2D(nb_filter,kernel_size, padding = padding, strides = strides, activation = 'relu')(x)
    x = BatchNormalization(axis = 3)(x)
    return x
コード例 #16
0
def train_VGG19(xdim,
                ydim,
                classes,
                trainGen,
                valGen,
                steps,
                NUM_EPOCHS,
                bs,
                save=False,
                name="Default"):
    print("[" + name + "] training w/ generator...")

    # Seed
    SEED = 50
    np.random.seed(SEED)
    tf.random.set_seed(SEED)

    opt = optm.Adam(lr=0.001,
                    beta_1=0.9,
                    beta_2=0.999,
                    epsilon=10E-8,
                    decay=0.001,
                    amsgrad=False)
    model = applications.VGG19(weights="imagenet",
                               include_top=False,
                               input_shape=(xdim, ydim, 3))

    #Adding custom Layers
    x = model.output
    x = Flatten()(x)
    x = Dense(512,
              use_bias=False,
              kernel_initializer=initializers.he_normal(seed=SEED))(x)
    x = BatchNormalization()(x)
    x = relu(x)
    x = Dense(512,
              use_bias=False,
              kernel_initializer=initializers.he_normal(seed=SEED))(x)
    x = BatchNormalization()(x)
    x = relu(x)
    predictions = Dense(classes, activation="softmax")(x)

    # creating the final model
    model_final = Model(inputs=model.input, outputs=predictions)
    print(model_final.summary())

    checkpointer = ModelCheckpoint(
        filepath=name + '_best_weights.h5',
        verbose=1,
        monitor='val_loss',
        mode='auto',
        save_best_only=True)  #save at each epoch if the validation decreased
    tbr = TensorBoard(log_dir='./logs',
                      histogram_freq=0,
                      write_graph=True,
                      write_images=True,
                      embeddings_freq=0,
                      embeddings_layer_names=None,
                      embeddings_metadata=None,
                      embeddings_data=None,
                      update_freq='epoch')

    # compile the model
    model_final.compile(loss="categorical_crossentropy",
                        optimizer=opt,
                        metrics=["accuracy"])

    H = model_final.fit_generator(trainGen,
                                  steps_per_epoch=steps,
                                  validation_data=valGen,
                                  validation_steps=steps,
                                  epochs=NUM_EPOCHS,
                                  use_multiprocessing=MP,
                                  verbose=1)  #,
    #callbacks=[tbr])

    if (save == True):
        print("\nSaving model: " + name)
        model_final.save_weights(name + '_modelWeight.h5')

        model_final.save(name + '_fullModel.h5')

        with open(name + '_architecture.json', 'w') as f:
            f.write(model_final.to_json())

        with open(name + '_hist', 'wb') as file_pi:
            pickle.dump(H.history, file_pi)

        print('\nModel saved!\n')
    else:
        print('\nModel not saved!\n')

    return H, model_final
コード例 #17
0
ファイル: model.py プロジェクト: xzm2004260/malaya-speech
    def __init__(
        self,
        inputs,
        training = True,
        ksize = 5,
        n_layers = 12,
        channels_interval = 24,
        logging = True,
    ):
        conv_activation_layer = _get_conv_activation_layer({})
        kernel_initializer = he_uniform(seed = 50)

        conv1d_factory = partial(
            Conv1D,
            strides = (2),
            padding = 'same',
            kernel_initializer = kernel_initializer,
        )

        def resnet_block(input_tensor, filter_size):

            res = conv1d_factory(
                filter_size, (1), strides = (1), use_bias = False
            )(input_tensor)
            conv1 = conv1d_factory(filter_size, (5), strides = (1))(
                input_tensor
            )
            batch1 = BatchNormalization(axis = -1)(conv1, training = training)
            rel1 = conv_activation_layer(batch1)
            conv2 = conv1d_factory(filter_size, (5), strides = (1))(rel1)
            batch2 = BatchNormalization(axis = -1)(conv2, training = training)
            resconnection = Add()([res, batch2])
            rel2 = conv_activation_layer(resconnection)
            return rel2

        self.n_layers = n_layers
        self.channels_interval = channels_interval
        out_channels = [
            i * self.channels_interval for i in range(1, self.n_layers + 1)
        ]
        self.middle = tf.keras.Sequential()
        self.middle.add(
            tf.keras.layers.Conv1D(
                self.n_layers * self.channels_interval,
                kernel_size = 15,
                strides = 1,
                padding = 'SAME',
                dilation_rate = 1,
            )
        )
        self.middle.add(BatchNormalization(axis = -1))
        self.middle.add(LeakyReLU(0.2))

        decoder_out_channels_list = out_channels[::-1]

        self.decoder = []
        for i in range(self.n_layers):
            self.decoder.append(
                UpSamplingLayer(channel_out = decoder_out_channels_list[i])
            )
        self.out = tf.keras.Sequential()
        self.out.add(
            tf.keras.layers.Conv1D(
                1,
                kernel_size = 1,
                strides = 1,
                padding = 'SAME',
                dilation_rate = 1,
            )
        )
        self.out.add(Activation('tanh'))

        tmp = []
        o = inputs

        for i in range(self.n_layers):
            o = resnet_block(o, out_channels[i])
            tmp.append(o)
            o = o[:, ::2]
            if logging:
                print(o)

        o = self.middle(o, training = training)
        if logging:
            print(o)

        for i in range(self.n_layers):
            o = tf.image.resize(
                o, [tf.shape(o)[0], tf.shape(o)[1] * 2], method = 'nearest'
            )
            o = tf.concat([o, tmp[self.n_layers - i - 1]], axis = 2)
            o = self.decoder[i](o, training = training)
            if logging:
                print(o)

        if logging:
            print(o, inputs)
        o = tf.concat([o, inputs], axis = 2)
        o = self.out(o, training = training)
        self.logits = o
コード例 #18
0
    def __init__(self, input_shape, mr1_input_shape, mr2_input_shape):
        with tf.variable_scope("U-Net"):
            self.kernel_size = (5, 5)  # (5,5)
            self.stride = (2, 2)
            self.leakiness = 0.2
            self.dropout_rate = 0.5
            # stride for mr1
            self.t_mr1_stride = (2, 1)
            self.f_mr1_stride = (1, 2)
            # stride for mr2
            self.f_mr2_stride = (1, 8)

            #mr1: t 512 to 256
            self.mr1_conv1 = Conv2D(4,
                                    self.kernel_size,
                                    self.t_mr1_stride,
                                    input_shape=mr1_input_shape,
                                    padding='same')
            self.mr1_Bnorm1 = BatchNormalization()

            #mr2:
            self.mr2_conv1 = Conv2D(8,
                                    self.kernel_size,
                                    self.f_mr2_stride,
                                    input_shape=mr2_input_shape,
                                    padding='same')
            self.mr2_Bnorm1 = BatchNormalization()

            # endcoder
            self.conv1 = Conv2D(4,
                                self.kernel_size,
                                self.f_mr1_stride,
                                input_shape=input_shape,
                                padding='same')
            self.Bnorm1 = BatchNormalization()
            self.conv2 = Conv2D(8,
                                self.kernel_size,
                                self.stride,
                                padding='same')
            self.Bnorm2 = BatchNormalization()
            self.conv3 = Conv2D(32,
                                self.kernel_size,
                                self.stride,
                                padding='same')
            self.Bnorm3 = BatchNormalization()
            # decoder
            self.deconv1 = Conv2DTranspose(16,
                                           self.kernel_size,
                                           self.stride,
                                           padding='same')
            self.deBnorm1 = BatchNormalization()
            self.Dropout1 = Dropout(rate=self.dropout_rate)
            self.deconv2 = Conv2DTranspose(8,
                                           self.kernel_size,
                                           self.stride,
                                           padding='same')
            self.deBnorm2 = BatchNormalization()
            self.deconv3 = Conv2DTranspose(1,
                                           self.kernel_size,
                                           self.f_mr1_stride,
                                           padding='same')
            self.deBnorm3 = BatchNormalization()
コード例 #19
0
                                                  patience=10)
'''LR_reduction = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                    factor=0.1, 
                                                    patience=10,
                                                    mode='auto', 
                                                    cooldown=0,
                                                    min_lr=0
                                                   )'''

# In[40]:

#Multi-layred ANN

model = tf.keras.models.Sequential()

model.add(BatchNormalization(input_shape=(5000, ), axis=1))

#model.add(Flatten())

model.add(Dense(500, activation='relu'))

model.add(Dropout(0.5))

model.add(Dense(200, activation='relu'))

model.add(Dropout(0.5))

model.add(Dense(100, activation='relu'))

model.add(Dropout(0.4))
コード例 #20
0
inputShapeRGB = (128, 128, 3)
inputRGB = Input(shape = inputShapeRGB)
chanDim = -1
regress = False

filters = (16, 32, 64)

for (i, f) in enumerate(filters):
    print("i: {} f: {}".format(i, f))
    if (i == 0):
        x = inputRGB
    
    x = Conv2D(f, (3, 3), padding = "same")(x)
    x = Activation("relu")(x)
    x = BatchNormalization(axis = chanDim)(x) # normalize each batch by both mean and variance reference
    x = MaxPooling2D(pool_size = (2, 2))(x)

x = Flatten()(x)
x = Dense(16)(x)
x = Activation("relu")(x)
x = BatchNormalization(axis = chanDim)(x)
x = Dropout(0.5)(x) # helps prevent overfitting by randomly setting a fraction 0.5 of input units to 0
x = Dense(4)(x)
x = Activation("relu")(x)

if (regress):
    x = Dense(1, activation = "linear")(x)

rgbModel = Model(inputRGB, x)
print(rgbModel.summary())
コード例 #21
0
y_test_std = (y_test - np.min(y_train)) / (np.max(y_train) - np.min(y_train))

#-------------------------------------------------------------------------------
# Creating convolutional Model
#-------------------------------------------------------------------------------

model = Sequential()

model.add(
    Conv2D(filters=64,
           kernel_size=(3, 3),
           padding='same',
           input_shape=(4, 4, 1)))
#model.add(ReLU())
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization(momentum=0.8))
model.add(Dropout(0.1))
model.add(AveragePooling2D(pool_size=(2, 2), padding='same'))

model.add(Conv2D(filters=128, kernel_size=(2, 2), padding='same'))
#model.add(ReLU())
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization(momentum=0.8))
model.add(Dropout(0.1))
model.add(AveragePooling2D(pool_size=(2, 2), padding='same'))

model.add(Conv2D(filters=512, kernel_size=(2, 2), padding='same'))
#model.add(ReLU())
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization(momentum=0.8))
model.add(Dropout(0.1))
コード例 #22
0
numLabels = len(np.unique(trainY))
trainY = to_categorical(trainY, numLabels)
testY = to_categorical(testY, numLabels)

# account for skew in the labeled data
classTotals = trainY.sum(axis=0)
classWeight = classTotals.max() / classTotals

model = Sequential()
inputShape = (32, 32, 3)
chanDim = -1
classes = numLabels
# CONV => RELU => BN => POOL
model.add(Conv2D(8, (5, 5), padding="same", input_shape=inputShape))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
# first set of (CONV => RELU => CONV => RELU) * 2 => POOL
model.add(Conv2D(16, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(16, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
# second set of (CONV => RELU => CONV => RELU) * 2 => POOL
model.add(Conv2D(32, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(32, (3, 3), padding="same"))
model.add(Activation("relu"))
コード例 #23
0
    def _small_conv_block_(self,
                           _input_,
                           filters: int,
                           kernel_size: Union[int, tuple] = (3, 3),
                           strides: Union[tuple, int] = (1, 1),
                           padding: str = 'same',
                           position: str = 'non_first_block'):
        '''
        Defining a small 2_consecutive_conv blocks
        Params:
        :filters: number of filters in current blocks
        :kernel_size:    size of the kernel in conv_layer
        :stride:         stride of conv_kernel
        :padding:        default padding for conv_layer
        :position:       position of these 2 conv_blocks with respect to the Net
        Return:
        Output of the 2 conv_blocks
        '''
        #Depend on the position
        if (position == 'very_first_block'):
            #Very first block of all Resnet
            _input_ = self._conv_batchnorm_dropout_(_input_,
                                                    filters=64,
                                                    kernel_size=(7, 7),
                                                    strides=(2, 2),
                                                    padding='same')
            _input_ = Activation('relu')(_input_)
            _input_ = MaxPooling2D(pool_size=(3, 3),
                                   strides=(2, 2),
                                   padding="same")(_input_)

            #Just for the second conv_block of every Resnet, strides = (1,1)
            stride_first_layer = strides

        elif (position == 'first_block'):
            #For every first conv_block of larger conv_chain
            stride_first_layer = (2, 2)

        elif (position == 'non_first_block'):
            #Else
            stride_first_layer = strides

        input_shape = _input_.shape
        #A module of 2 conv_blocks
        if (self.mode == '2in1'):
            output = self._conv_batchnorm_dropout_(_input_,
                                                   filters=filters,
                                                   strides=stride_first_layer)
            output = Activation('relu')(output)
            output_shape = output.shape
            output = self._conv_batchnorm_dropout_(output,
                                                   filters=filters,
                                                   strides=strides)

        elif (self.mode == '3in1'):
            pass
            return

        #Check to see what kind of skip connection is used
        if (input_shape[1] == output_shape[1]
                and input_shape[-1] == output_shape[-1]):
            #if(input_shape == output_shape): Don't know why this doesn't work
            shortcut = _input_
        else:
            shortcut = Conv2D(filters,
                              kernel_size=(1, 1),
                              strides=(2, 2),
                              padding='same',
                              kernel_initializer='he_normal')(_input_)
            shortcut = BatchNormalization()(shortcut)
            shortcut = Dropout(self.dropout_rate)(shortcut)

        output = Add()([output, shortcut])  #Shortcut is merged
        output = Activation('relu')(output)

        return output
コード例 #24
0
    def __init__(self):
        super(FullRanet, self).__init__()

        # Define Classification Threshold
        self.threshold = 0.7

        # Define the layers here
        self.small_input = tf.keras.layers.AveragePooling2D(pool_size=(4, 4))
        self.med_input = tf.keras.layers.AveragePooling2D(pool_size=(2, 2))

        # The first conv2d layer in (small, med and large) models
        self.conv_in1 = Conv2D(filters=64,
                               kernel_size=(3, 3),
                               padding='same',
                               activation=None)
        self.conv_in2 = Conv2D(filters=64,
                               kernel_size=(3, 3),
                               padding='same',
                               activation=None)
        self.conv_in3 = Conv2D(filters=64,
                               kernel_size=(3, 3),
                               padding='same',
                               activation=None)
        # Remaining conv2d layers
        self.small_conv2 = Conv2D(filters=64,
                                  kernel_size=(3, 3),
                                  padding='same',
                                  activation=None)
        self.med_conv2 = Conv2D(filters=64,
                                kernel_size=(3, 3),
                                padding='same',
                                activation=None)
        self.med_conv3 = Conv2D(filters=64,
                                kernel_size=(3, 3),
                                padding='same',
                                activation=None)
        self.med_conv4 = Conv2D(filters=64,
                                kernel_size=(3, 3),
                                padding='same',
                                activation=None)
        self.large_conv2 = Conv2D(filters=64,
                                  kernel_size=(3, 3),
                                  padding='same',
                                  activation=None)
        self.large_conv3 = Conv2D(filters=64,
                                  kernel_size=(3, 3),
                                  padding='same',
                                  activation=None)
        self.large_conv4 = Conv2D(filters=64,
                                  kernel_size=(3, 3),
                                  padding='same',
                                  activation=None)
        self.large_conv5 = Conv2D(filters=64,
                                  kernel_size=(3, 3),
                                  padding='same',
                                  activation=None)
        self.large_conv6 = Conv2D(filters=64,
                                  kernel_size=(3, 3),
                                  padding='same',
                                  activation=None)

        # Batch normalization and relu layers
        self.bn1 = BatchNormalization()
        self.relu1 = ReLU()
        self.bn2 = BatchNormalization()
        self.relu2 = ReLU()
        self.bn3 = BatchNormalization()
        self.relu3 = ReLU()
        self.bn4 = BatchNormalization()
        self.relu4 = ReLU()
        self.bn5 = BatchNormalization()
        self.relu5 = ReLU()
        self.bn6 = BatchNormalization()
        self.relu6 = ReLU()
        self.bn7 = BatchNormalization()
        self.relu7 = ReLU()
        self.bn8 = BatchNormalization()
        self.relu8 = ReLU()
        self.bn9 = BatchNormalization()
        self.relu9 = ReLU()
        self.bn10 = BatchNormalization()
        self.relu10 = ReLU()
        self.bn11 = BatchNormalization()
        self.relu11 = ReLU()
        self.bn12 = BatchNormalization()
        self.relu12 = ReLU()

        # Classification Conv2d layers
        self.class_small_conv1 = Conv2D(filters=128,
                                        kernel_size=3,
                                        padding='same',
                                        strides=2,
                                        activation='relu')
        self.class_small_conv2 = Conv2D(filters=128,
                                        kernel_size=3,
                                        padding='same',
                                        strides=2,
                                        activation='relu')
        self.class_med_conv1 = Conv2D(filters=128,
                                      kernel_size=3,
                                      padding='same',
                                      strides=2,
                                      activation='relu')
        self.class_med_conv2 = Conv2D(filters=128,
                                      kernel_size=3,
                                      padding='same',
                                      strides=2,
                                      activation='relu')
        self.class_large_conv1 = Conv2D(filters=128,
                                        kernel_size=3,
                                        padding='same',
                                        strides=2,
                                        activation='relu')
        self.class_large_conv2 = Conv2D(filters=128,
                                        kernel_size=3,
                                        padding='same',
                                        strides=2,
                                        activation='relu')

        # reduction layers
        self.conv_red1 = Conv2D(filters=64,
                                kernel_size=1,
                                padding='same',
                                activation=None)
        self.conv_red2 = Conv2D(filters=64,
                                kernel_size=1,
                                padding='same',
                                activation=None)
        self.conv_red3 = Conv2D(filters=64,
                                kernel_size=1,
                                padding='same',
                                activation=None)
        self.conv_red4 = Conv2D(filters=64,
                                kernel_size=1,
                                padding='same',
                                activation=None)
        self.conv_red5 = Conv2D(filters=64,
                                kernel_size=1,
                                padding='same',
                                activation=None)
        self.conv_red6 = Conv2D(filters=64,
                                kernel_size=1,
                                padding='same',
                                activation=None)

        # Flatten, dropout and dense layers
        self.flatten1 = Flatten()
        self.flatten2 = Flatten()
        self.flatten3 = Flatten()
        self.dropout1 = Dropout(0.2)
        self.dropout2 = Dropout(0.2)
        self.dropout3 = Dropout(0.2)
        self.dropout4 = Dropout(0.2)
        self.dropout5 = Dropout(0.2)
        self.dropout6 = Dropout(0.2)
        self.dense_1 = Dense(units=10, activation='softmax', name="output_1")
        self.dense_2 = Dense(units=10, activation='softmax', name="output_2")
        self.dense_3 = Dense(units=10, activation='softmax', name="output_3")

        # Merge Layers (upsample2D and concatenate)
        self.upsamp1 = UpSampling2D(size=(2, 2), interpolation='bilinear')
        self.concat1 = Concatenate(axis=-1)
        self.upsamp2 = UpSampling2D(size=(2, 2), interpolation='bilinear')
        self.concat2 = Concatenate(axis=-1)
        self.upsamp3 = UpSampling2D(size=(2, 2), interpolation='bilinear')
        self.concat3 = Concatenate(axis=-1)
        self.upsamp4 = UpSampling2D(size=(2, 2), interpolation='bilinear')
        self.concat4 = Concatenate(axis=-1)
        self.upsamp5 = UpSampling2D(size=(2, 2), interpolation='bilinear')
        self.concat5 = Concatenate(axis=-1)
        self.upsamp6 = UpSampling2D(size=(2, 2), interpolation='bilinear')
        self.concat6 = Concatenate(axis=-1)
コード例 #25
0
ファイル: mobilenet.py プロジェクト: Casuy/snake_training
def mobilenet(input_tensor):
    #----------------------------主干特征提取网络开始---------------------------#
    # SSD结构,net字典
    net = {}
    # Block 1
    x = input_tensor
    # 300,300,3 -> 150,150,64
    x = Conv2D(32, (3, 3),
               padding='same',
               use_bias=False,
               strides=(2, 2),
               name='conv1')(input_tensor)
    x = BatchNormalization(name='conv1_bn')(x)
    x = Activation(relu6, name='conv1_relu')(x)
    x = _depthwise_conv_block(x, 64, 1, block_id=1)

    # 150,150,64 -> 75,75,128
    x = _depthwise_conv_block(x, 128, 1, strides=(2, 2), block_id=2)
    x = _depthwise_conv_block(x, 128, 1, block_id=3)

    # Block 3
    # 75,75,128 -> 38,38,256
    x = _depthwise_conv_block(x, 256, 1, strides=(2, 2), block_id=4)

    x = _depthwise_conv_block(x, 256, 1, block_id=5)
    net['conv4_3'] = x

    # Block 4
    # 38,38,256 -> 19,19,512
    x = _depthwise_conv_block(x, 512, 1, strides=(2, 2), block_id=6)
    x = _depthwise_conv_block(x, 512, 1, block_id=7)
    x = _depthwise_conv_block(x, 512, 1, block_id=8)
    x = _depthwise_conv_block(x, 512, 1, block_id=9)
    x = _depthwise_conv_block(x, 512, 1, block_id=10)
    x = _depthwise_conv_block(x, 512, 1, block_id=11)

    # Block 5
    # 19,19,512 -> 19,19,1024
    x = _depthwise_conv_block(x, 1024, 1, strides=(1, 1), block_id=12)
    x = _depthwise_conv_block(x, 1024, 1, block_id=13)
    net['fc7'] = x

    # x = Dropout(0.5, name='drop7')(x)
    # Block 6
    # 19,19,512 -> 10,10,512
    net['conv6_1'] = Conv2D(256,
                            kernel_size=(1, 1),
                            activation='relu',
                            padding='same',
                            name='conv6_1')(net['fc7'])
    net['conv6_2'] = ZeroPadding2D(padding=((1, 1), (1, 1)),
                                   name='conv6_padding')(net['conv6_1'])
    net['conv6_2'] = Conv2D(512,
                            kernel_size=(3, 3),
                            strides=(2, 2),
                            activation='relu',
                            name='conv6_2')(net['conv6_2'])

    # Block 7
    # 10,10,512 -> 5,5,256
    net['conv7_1'] = Conv2D(128,
                            kernel_size=(1, 1),
                            activation='relu',
                            padding='same',
                            name='conv7_1')(net['conv6_2'])
    net['conv7_2'] = ZeroPadding2D(padding=((1, 1), (1, 1)),
                                   name='conv7_padding')(net['conv7_1'])
    net['conv7_2'] = Conv2D(256,
                            kernel_size=(3, 3),
                            strides=(2, 2),
                            activation='relu',
                            padding='valid',
                            name='conv7_2')(net['conv7_2'])
    # Block 8
    # 5,5,256 -> 3,3,256
    net['conv8_1'] = Conv2D(128,
                            kernel_size=(1, 1),
                            activation='relu',
                            padding='same',
                            name='conv8_1')(net['conv7_2'])
    net['conv8_2'] = Conv2D(256,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            activation='relu',
                            padding='valid',
                            name='conv8_2')(net['conv8_1'])

    # Block 9
    # 3,3,256 -> 1,1,256
    net['conv9_1'] = Conv2D(128,
                            kernel_size=(1, 1),
                            activation='relu',
                            padding='same',
                            name='conv9_1')(net['conv8_2'])
    net['conv9_2'] = Conv2D(256,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            activation='relu',
                            padding='valid',
                            name='conv9_2')(net['conv9_1'])
    #----------------------------主干特征提取网络结束---------------------------#
    return net
コード例 #26
0
    def build_v2(input_shape, alpha, depth_multiplier, dropout, include_top,
                 input_tensor, pooling, classes, output_names):
        channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

        img_input = Input(shape=input_shape)
        first_block_filters = make_divisible(32 * alpha, 8)
        class_name = output_names
        # If input_shape is None, infer shape from input_tensor
        if input_shape is None and input_tensor is not None:

            try:
                backend.is_keras_tensor(input_tensor)
            except ValueError:
                raise ValueError('input_tensor: ', input_tensor, 'is type: ',
                                 type(input_tensor),
                                 'which is not a valid type')

            if input_shape is None and not backend.is_keras_tensor(
                    input_tensor):
                default_size = 224
            elif input_shape is None and backend.is_keras_tensor(input_tensor):
                if backend.image_data_format() == 'channels_first':
                    rows = backend.int_shape(input_tensor)[2]
                    cols = backend.int_shape(input_tensor)[3]
                else:
                    rows = backend.int_shape(input_tensor)[1]
                    cols = backend.int_shape(input_tensor)[2]

                if rows == cols and rows in [96, 128, 160, 192, 224]:
                    default_size = rows
                else:
                    default_size = 224

        # If input_shape is None and no input_tensor
        elif input_shape is None:
            default_size = 224

        # If input_shape is not None, assume default size
        else:
            if K.image_data_format() == 'channels_first':
                rows = input_shape[1]
                cols = input_shape[2]
            else:
                rows = input_shape[0]
                cols = input_shape[1]

            if rows == cols and rows in [96, 128, 160, 192, 224]:
                default_size = rows
            else:
                default_size = 224

        x = ZeroPadding2D(padding=correct_pad(K, img_input, 3),
                          name='Conv1_pad')(img_input)

        x = Conv2D(first_block_filters,
                   kernel_size=3,
                   strides=(2, 2),
                   padding='valid',
                   use_bias=False,
                   name='Conv1')(x)

        x = BatchNormalization(axis=channel_axis,
                               epsilon=1e-3,
                               momentum=0.999,
                               name='bn_Conv1')(x)

        x = Activation('relu', name='Conv1_relu')(x)

        x = inverted_res_block(x,
                               filters=16,
                               alpha=alpha,
                               stride=1,
                               expansion=1,
                               block_id=0)

        x = inverted_res_block(x,
                               filters=24,
                               alpha=alpha,
                               stride=2,
                               expansion=6,
                               block_id=1)
        x = inverted_res_block(x,
                               filters=24,
                               alpha=alpha,
                               stride=1,
                               expansion=6,
                               block_id=2)

        x = inverted_res_block(x,
                               filters=32,
                               alpha=alpha,
                               stride=2,
                               expansion=6,
                               block_id=3)
        x = inverted_res_block(x,
                               filters=32,
                               alpha=alpha,
                               stride=1,
                               expansion=6,
                               block_id=4)
        x = inverted_res_block(x,
                               filters=32,
                               alpha=alpha,
                               stride=1,
                               expansion=6,
                               block_id=5)

        x = inverted_res_block(x,
                               filters=64,
                               alpha=alpha,
                               stride=2,
                               expansion=6,
                               block_id=6)
        x = inverted_res_block(x,
                               filters=64,
                               alpha=alpha,
                               stride=1,
                               expansion=6,
                               block_id=7)
        x = inverted_res_block(x,
                               filters=64,
                               alpha=alpha,
                               stride=1,
                               expansion=6,
                               block_id=8)
        x = inverted_res_block(x,
                               filters=64,
                               alpha=alpha,
                               stride=1,
                               expansion=6,
                               block_id=9)

        x = inverted_res_block(x,
                               filters=96,
                               alpha=alpha,
                               stride=1,
                               expansion=6,
                               block_id=10)
        x = inverted_res_block(x,
                               filters=96,
                               alpha=alpha,
                               stride=1,
                               expansion=6,
                               block_id=11)
        x = inverted_res_block(x,
                               filters=96,
                               alpha=alpha,
                               stride=1,
                               expansion=6,
                               block_id=12)

        x = inverted_res_block(x,
                               filters=160,
                               alpha=alpha,
                               stride=2,
                               expansion=6,
                               block_id=13)
        x = inverted_res_block(x,
                               filters=160,
                               alpha=alpha,
                               stride=1,
                               expansion=6,
                               block_id=14)
        x = inverted_res_block(x,
                               filters=160,
                               alpha=alpha,
                               stride=1,
                               expansion=6,
                               block_id=15)

        x = inverted_res_block(x,
                               filters=320,
                               alpha=alpha,
                               stride=1,
                               expansion=6,
                               block_id=16)

        # no alpha applied to last conv as stated in the paper:
        # if the width multiplier is greater than 1 we
        # increase the number of output channels
        if alpha > 1.0:
            last_block_filters = make_divisible(1280 * alpha, 8)
        else:
            last_block_filters = 1280

        x = Conv2D(last_block_filters,
                   kernel_size=1,
                   use_bias=False,
                   name='Conv_1')(x)
        x = BatchNormalization(axis=channel_axis,
                               epsilon=1e-3,
                               momentum=0.999,
                               name='Conv_1_bn')(x)
        x = Activation('relu', name='out_relu')(x)

        if include_top:
            x = GlobalAveragePooling2D()(x)
            x = Dense(classes,
                      activation='softmax',
                      use_bias=True,
                      name='Logits')(x)
        else:
            if pooling == 'avg':
                x = GlobalAveragePooling2D()(x)
            elif pooling == 'max':
                x = GlobalMaxPooling2D()(x)

        if input_tensor is not None:
            inputs = keras_utils.get_source_inputs(input_tensor)
        else:
            inputs = img_input

        # Create model.
        model = Model(inputs, outputs=x)

        return model
コード例 #27
0
nb_classes = len(categories)


#일반화
x_train = preprocess_input(x_train)
x_test = preprocess_input(x_test)


resnet101 = ResNet101(include_top=False,weights='imagenet',input_shape=x_train.shape[1:])
resnet101.trainable = False
x = resnet101.output
x = MaxPooling2D(pool_size=(2,2)) (x)
x = Flatten() (x)

x = Dense(128, activation= 'relu') (x)
x = BatchNormalization() (x)
x = Dense(64, activation= 'relu') (x)
x = BatchNormalization() (x)
x = Dense(10, activation= 'softmax') (x)

model = Model(inputs = resnet101.input, outputs = x)
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=Adam(1e-5), metrics=['acc'])


# model_path = '../data/modelcheckpoint/Pproject152.hdf5'
# checkpoint = ModelCheckpoint(filepath=model_path , monitor='val_loss', verbose=1, save_best_only=True)
# early_stopping = EarlyStopping(monitor='val_loss', patience=10)
# # lr = ReduceLROnPlateau(patience=30, factor=0.5,verbose=1)

# history = model.fit(x_train, y_train, batch_size=16, epochs=50, validation_data=(x_test, y_test),callbacks=[early_stopping,
コード例 #28
0
def train(batch_size=500, n=50, data=1):
    dataset = f"train/data0{data}_train"
    version = f"data0{data}_{n}"
    checkpoint_path = f'checkpoint_{version}.hdf5'
    log_dir = f'logs/{version}'
    epochs = 100
    img_width = 200
    img_height = 60
    alphabet = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
    char_to_int = dict((c, i) for i, c in enumerate(alphabet))
    int_to_char = dict((i, c) for i, c in enumerate(alphabet))
    df = pd.read_csv(f'{dataset}.csv', delimiter=',')
    df['code'] = df['code'].apply(lambda el: list(el))
    df[[f'code{i}' for i in range(1, 7)]] = pd.DataFrame(df['code'].to_list(),
                                                         index=df.index)
    for i in range(1, 7):
        df[f'code{i}'] = df[f'code{i}'].apply(
            lambda el: to_categorical(char_to_int[el], len(alphabet)))
    datagen = ImageDataGenerator(rescale=1. / 255, validation_split=0.2)
    train_generator = datagen.flow_from_dataframe(
        dataframe=df,
        directory=dataset,
        subset='training',
        x_col="filename",
        y_col=[f'code{i}' for i in range(1, 7)],
        class_mode="multi_output",
        target_size=(img_height, img_width),
        batch_size=batch_size)
    valid_generator = datagen.flow_from_dataframe(
        dataframe=df,
        directory=dataset,
        subset='validation',
        x_col="filename",
        y_col=[f'code{i}' for i in range(1, 7)],
        class_mode="multi_output",
        target_size=(img_height, img_width),
        batch_size=batch_size)
    input_shape = (img_height, img_width, 3)
    main_input = Input(shape=input_shape)
    x = main_input
    x = Conv2D(filters=64, kernel_size=(3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation(activation='relu')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation(activation='relu')(x)
    x = MaxPooling2D(pool_size=(2, 2), padding='same')(x)
    x = Dropout(0.2)(x)
    x = Conv2D(filters=128, kernel_size=(3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation(activation='relu')(x)
    x = Conv2D(filters=128, kernel_size=(3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation(activation='relu')(x)
    x = MaxPooling2D(pool_size=(2, 2), padding='same')(x)
    x = Dropout(0.2)(x)
    x = Conv2D(filters=256, kernel_size=(3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation(activation='relu')(x)
    x = Conv2D(filters=256, kernel_size=(3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation(activation='relu')(x)
    x = MaxPooling2D(pool_size=(2, 2), padding='same')(x)
    x = Conv2D(filters=512, kernel_size=(3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation(activation='relu')(x)
    x = GlobalAveragePooling2D()(x)
    x = Dropout(0.2)(x)
    x = RepeatVector(6)(x)
    x = GRU(128, return_sequences=True)(x)
    out = [
        Dense(len(alphabet), name=f'digit{i + 1}', activation='softmax')(
            Lambda(lambda z: z[:, i, :],
                   output_shape=(1, ) + input_shape[2:])(x)) for i in range(6)
    ]
    model = Model(main_input, out)
    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(0.0001),
                  metrics=['accuracy'])
    checkpoint = ModelCheckpoint(checkpoint_path,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 save_weights_only=False,
                                 mode='auto')
    if data == 1:
        earlystop = MinimumEpochEarlyStopping(monitor='val_loss',
                                              patience=10,
                                              verbose=1,
                                              mode='auto',
                                              min_epoch=5)
    else:
        earlystop = MinimumEpochEarlyStopping(monitor='val_loss',
                                              patience=10,
                                              verbose=1,
                                              mode='auto',
                                              min_epoch=10)
    tensorBoard = TensorBoard(log_dir=log_dir, histogram_freq=1)
    callbacks_list = [tensorBoard, earlystop, checkpoint]
    # callbacks_list = [tensorBoard]

    model.summary()
    train_history = model.fit(
        train_generator,
        steps_per_epoch=train_generator.n // train_generator.batch_size,
        epochs=epochs,
        validation_data=valid_generator,
        validation_steps=valid_generator.n // valid_generator.batch_size,
        verbose=1,
        callbacks=callbacks_list)
    with open(f"{version}.txt", "w") as file:
        loss_idx = np.argmin(train_history.history['val_loss'])
        digit6_idx = np.argmax(train_history.history['val_digit6_accuracy'])
        file.write(f"{train_history.history['val_loss'][loss_idx]}\n")
        file.write(
            f"{train_history.history['val_digit1_accuracy'][loss_idx]}\n")
        file.write(
            f"{train_history.history['val_digit2_accuracy'][loss_idx]}\n")
        file.write(
            f"{train_history.history['val_digit3_accuracy'][loss_idx]}\n")
        file.write(
            f"{train_history.history['val_digit4_accuracy'][loss_idx]}\n")
        file.write(
            f"{train_history.history['val_digit5_accuracy'][loss_idx]}\n")
        file.write(
            f"{train_history.history['val_digit6_accuracy'][loss_idx]}\n")
        file.write(f"{'-'*20}\n")
        file.write(f"{train_history.history['val_loss'][digit6_idx]}\n")
        file.write(
            f"{train_history.history['val_digit1_accuracy'][digit6_idx]}\n")
        file.write(
            f"{train_history.history['val_digit2_accuracy'][digit6_idx]}\n")
        file.write(
            f"{train_history.history['val_digit3_accuracy'][digit6_idx]}\n")
        file.write(
            f"{train_history.history['val_digit4_accuracy'][digit6_idx]}\n")
        file.write(
            f"{train_history.history['val_digit5_accuracy'][digit6_idx]}\n")
        file.write(
            f"{train_history.history['val_digit6_accuracy'][digit6_idx]}\n")
    K.clear_session()
コード例 #29
0
test_set = test_datagen.flow_from_directory('dataset/test_set',
                                            target_size=(150, 150),
                                            batch_size=32,
                                            class_mode='categorical')

model = models.Sequential()
model.add(
    layers.Conv2D(32, (3, 3),
                  strides=(1, 1),
                  padding='valid',
                  activation='relu',
                  kernel_initializer='glorot_uniform',
                  input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D(pool_size=(2, 2), padding='valid'))
model.add(BatchNormalization())
model.add(
    layers.Conv2D(
        32,
        (3, 3),
        strides=(1, 1),
        padding='valid',
        activation='relu',
        kernel_initializer='glorot_uniform',
    ))
model.add(layers.MaxPooling2D(pool_size=(2, 2), padding='valid'))
model.add(BatchNormalization())
model.add(
    layers.Conv2D(
        64,
        (3, 3),
コード例 #30
0
ファイル: graph_models.py プロジェクト: zmsunnyday/deepchem
    def __init__(self,
                 n_tasks,
                 graph_conv_layers,
                 dense_layer_size=128,
                 dropout=0.0,
                 mode="classification",
                 number_atom_features=75,
                 n_classes=2,
                 batch_normalize=True,
                 uncertainty=False,
                 batch_size=100):
        """An internal keras model class.

    The graph convolutions use a nonstandard control flow so the
    standard Keras functional API can't support them. We instead
    use the imperative "subclassing" API to implement the graph
    convolutions.

    All arguments have the same meaning as in GraphConvModel.
    """
        super(_GraphConvKerasModel, self).__init__()
        if mode not in ['classification', 'regression']:
            raise ValueError(
                "mode must be either 'classification' or 'regression'")

        self.mode = mode
        self.uncertainty = uncertainty

        if not isinstance(dropout, collections.Sequence):
            dropout = [dropout] * (len(graph_conv_layers) + 1)
        if len(dropout) != len(graph_conv_layers) + 1:
            raise ValueError('Wrong number of dropout probabilities provided')
        if uncertainty:
            if mode != "regression":
                raise ValueError(
                    "Uncertainty is only supported in regression mode")
            if any(d == 0.0 for d in dropout):
                raise ValueError(
                    'Dropout must be included in every layer to predict uncertainty'
                )

        self.graph_convs = [
            layers.GraphConv(layer_size, activation_fn=tf.nn.relu)
            for layer_size in graph_conv_layers
        ]
        self.batch_norms = [
            BatchNormalization(fused=False) if batch_normalize else None
            for _ in range(len(graph_conv_layers) + 1)
        ]
        self.dropouts = [
            Dropout(rate=rate) if rate > 0.0 else None for rate in dropout
        ]
        self.graph_pools = [layers.GraphPool() for _ in graph_conv_layers]
        self.dense = Dense(dense_layer_size, activation=tf.nn.relu)
        self.graph_gather = layers.GraphGather(batch_size=batch_size,
                                               activation_fn=tf.nn.tanh)
        self.trim = TrimGraphOutput()
        if self.mode == 'classification':
            self.reshape_dense = Dense(n_tasks * n_classes)
            self.reshape = Reshape((n_tasks, n_classes))
            self.softmax = Softmax()
        else:
            self.regression_dense = Dense(n_tasks)
            if self.uncertainty:
                self.uncertainty_dense = Dense(n_tasks)
                self.uncertainty_trim = TrimGraphOutput()
                self.uncertainty_activation = Activation(tf.exp)