예제 #1
0
def ResModel(input_shape, n_channels=3):
    """
    residual model structure:
    input +
    [conv 3*3*n_channels + conv 3*3*n_channels] * num_res_blocks +
    flatten +
    sigmoid
    """
    our_model = Sequential()
    num_res_clocks = 3
    kernel_size = (3, 3)
    # build first layer
    our_model.add(building_residual_block(input_shape, n_channels,
                                          kernel_size))
    for i in range(num_res_clocks - 1):
        our_model.add(
            building_residual_block(
                (input_shape[0], input_shape[1], n_channels), n_channels,
                kernel_size))
    our_model.add(Flatten())
    our_model.add(Dense(24, activation='relu'))
    our_model.add(Dropout(0.4))
    our_model.add(Dense(1, activation='sigmoid'))
    our_model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
    plot(our_model, to_file='res_model.png')
    return our_model
예제 #2
0
def design_for_residual_blocks(num_channel_input=1):
    ''''''
    model = Sequential()  # it's a CONTAINER, not MODEL
    # set numbers
    num_big_blocks = 3
    image_patch_sizes = [[3, 3]] * num_big_blocks
    pool_sizes = [(2, 2)] * num_big_blocks
    n_features = [128, 256, 512, 512, 1024]
    n_features_next = [256, 512, 512, 512, 1024]
    height_input = 32
    width_input = 32
    for conv_idx in range(num_big_blocks):
        n_feat_here = n_features[conv_idx]
        # residual block 0
        model.add(
            residual_blocks.building_residual_block(
                (num_channel_input, height_input, width_input),
                n_feat_here,
                kernel_sizes=image_patch_sizes[conv_idx]))

        # residual block 1 (you can add it as you want (and your resources allow..))
        if False:
            model.add(
                residual_blocks.building_residual_block(
                    (n_feat_here, height_input, width_input),
                    n_feat_here,
                    kernel_sizes=image_patch_sizes[conv_idx]))

        # the last residual block N-1
        # the last one : pad zeros, subsamples, and increase #channels
        pad_height = compute_padding_length(height_input,
                                            pool_sizes[conv_idx][0],
                                            image_patch_sizes[conv_idx][0])
        pad_width = compute_padding_length(width_input,
                                           pool_sizes[conv_idx][1],
                                           image_patch_sizes[conv_idx][1])
        model.add(ZeroPadding2D(padding=(pad_height, pad_width)))
        height_input += 2 * pad_height
        width_input += 2 * pad_width
        n_feat_next = n_features_next[conv_idx]
        model.add(
            residual_blocks.building_residual_block(
                (n_feat_here, height_input, width_input),
                n_feat_next,
                kernel_sizes=image_patch_sizes[conv_idx],
                is_subsample=True,
                subsample=pool_sizes[conv_idx]))

        height_input, width_input = model.output_shape[2:]
        # width_input  = int(width_input/pool_sizes[conv_idx][1])
        num_channel_input = n_feat_next

    # Add average pooling at the end:
    print('Average pooling, from (%d,%d) to (1,1)' %
          (height_input, width_input))
    model.add(AveragePooling2D(pool_size=(height_input, width_input)))

    return model
예제 #3
0
def design_for_residual_blocks(num_channel_input=1):
    ''''''
    model = Sequential() # it's a CONTAINER, not MODEL
    # set numbers
    num_big_blocks = 3
    image_patch_sizes = [[3,3]]*num_big_blocks
    pool_sizes = [(2,2)]*num_big_blocks
    n_features = [128, 256, 512, 512, 1024]
    n_features_next = [256, 512, 512, 512, 1024]
    height_input = 32
    width_input = 32
    for conv_idx in range(num_big_blocks):    
        n_feat_here = n_features[conv_idx]
        # residual block 0
        model.add(residual_blocks.building_residual_block(  (num_channel_input, height_input, width_input),
                                                            n_feat_here,
                                                            kernel_sizes=image_patch_sizes[conv_idx]
                                                            ))

        # residual block 1 (you can add it as you want (and your resources allow..))
        if False:
            model.add(residual_blocks.building_residual_block(  (n_feat_here, height_input, width_input),
                                                                n_feat_here,
                                                                kernel_sizes=image_patch_sizes[conv_idx]
                                                                ))
        
        # the last residual block N-1
        # the last one : pad zeros, subsamples, and increase #channels
        pad_height = compute_padding_length(height_input, pool_sizes[conv_idx][0], image_patch_sizes[conv_idx][0])
        pad_width = compute_padding_length(width_input, pool_sizes[conv_idx][1], image_patch_sizes[conv_idx][1])
        model.add(ZeroPadding2D(padding=(pad_height,pad_width))) 
        height_input += 2*pad_height
        width_input += 2*pad_width
        n_feat_next = n_features_next[conv_idx]
        model.add(residual_blocks.building_residual_block(  (n_feat_here, height_input, width_input),
                                                            n_feat_next,
                                                            kernel_sizes=image_patch_sizes[conv_idx],
                                                            is_subsample=True,
                                                            subsample=pool_sizes[conv_idx]
                                                            ))

        height_input, width_input = model.output_shape[2:]
        # width_input  = int(width_input/pool_sizes[conv_idx][1])
        num_channel_input = n_feat_next

    # Add average pooling at the end:
    print('Average pooling, from (%d,%d) to (1,1)' % (height_input, width_input))
    model.add(AveragePooling2D(pool_size=(height_input, width_input)))

    return model
예제 #4
0
def design_for_residual_blocks(num_channel_input=1):

    model = Sequential()  # it's a CONTAINER, not MODEL
    # set numbers
    num_big_blocks = 3
    height_input = 56
    width_input = 300
    image_patch_sizes = [[3, width_input]] * num_big_blocks
    pool_sizes = [(height_input - 3 + 1, 1)] * num_big_blocks
    n_features = [128, 256, 512, 512, 1024]
    n_features_next = [256, 512, 512, 512, 1024]
    for conv_idx in range(num_big_blocks):
        n_feat_here = n_features[conv_idx]
        # residual block 0
        model.add(
            residual_blocks.building_residual_block(
                (num_channel_input, height_input, width_input),
                n_feat_here,
                kernel_sizes=image_patch_sizes[conv_idx]))

        # residual block 1 (you can add it as you want (and your resources allow..))
        if False:
            model.add(
                residual_blocks.building_residual_block(
                    (n_feat_here, height_input, width_input),
                    n_feat_here,
                    kernel_sizes=image_patch_sizes[conv_idx]))

        n_feat_next = n_features_next[conv_idx]
        model.add(
            residual_blocks.building_residual_block(
                (n_feat_here, height_input, width_input),
                n_feat_next,
                kernel_sizes=image_patch_sizes[conv_idx],
                is_subsample=True,
                subsample=pool_sizes[conv_idx]))

        height_input, width_input = model.output_shape[2:]
        num_channel_input = n_feat_next

    print('Average pooling, from (%d,%d) to (1,1)' %
          (height_input, width_input))
    model.add(AveragePooling2D(pool_size=(height_input, width_input)))

    return model
def model(X_train, Y_train, X_test, Y_test, X_valid, y_valid):
    '''
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    file_name = sys.argv[1]
    model_file_name = "./model/" + file_name + ".02.bestmodel.hdf5"

    NUM_FILTER1 = 80
    INPUT_LENGTH = 10000
    FILTER_LENGTH1 = 100
    INIT_VALUE = 'glorot_normal'
    POOL1 = 16
    POOL2 = 4
    POOL3 = 2

    print 'building model'
    model = Sequential()
    model.add(
        Convolution1D(input_dim=4,
                      input_length=INPUT_LENGTH,
                      nb_filter=NUM_FILTER1,
                      filter_length=FILTER_LENGTH1,
                      border_mode="valid",
                      subsample_length=1,
                      init=INIT_VALUE))
    model.add(MaxPooling1D(pool_length=POOL1))

    input_length, input_dim = model.output_shape[1:]
    nb_filter = 128
    filter_length = 10
    subsample = 1
    OUTPUT_DIM1 = {{choice([70, 75, 80, 85, 90])}}

    model.add(
        building_residual_block(r_input_length=input_length,
                                r_input_dim=input_dim,
                                r_nb_filter=nb_filter,
                                r_filter_length=filter_length,
                                is_subsample=True,
                                n_skip=2,
                                r_subsample=subsample))

    model.add(AveragePooling1D(pool_length=POOL2))
    model.add(Dropout(0.6))

    input_length, input_dim = model.output_shape[1:]
    model.add(
        building_residual_block(r_input_length=input_length,
                                r_input_dim=input_dim,
                                r_nb_filter=nb_filter,
                                r_filter_length=filter_length,
                                is_subsample=True,
                                n_skip=2,
                                r_subsample=subsample))

    model.add(AveragePooling1D(pool_length=POOL3))

    model.add(Flatten())
    model.add(Dense(output_dim=OUTPUT_DIM1, init=INIT_VALUE))

    model.add(Activation('tanh'))
    model.add(Dropout(0.5))

    model.add(Dense(output_dim=1))
    model.add(Activation('sigmoid'))

    print 'compiling model'
    sgd = SGD(lr=0.01, momentum=0.9, decay=1e-5, nesterov=True)
    model.compile(loss='binary_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])

    checkpoint = ModelCheckpoint(filepath=model_file_name,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')
    callbacks_list = [checkpoint]

    print model.summary()
    model.fit(X_train,
              y_train,
              batch_size=100,
              nb_epoch=10,
              shuffle=True,
              validation_data=(X_valid, y_valid),
              callbacks=callbacks_list)

    model.layers[1].get_weights()
    score, acc1 = model.evaluate(X_test, y_test, verbose=0)
    print 'predicting on test sequences'
    predret = model.predict(X_test, verbose=1)

    auc = roc_auc_score(y_test, predret)
    predret_class = model.predict_classes(X_test, verbose=1)
    mcc = matthews_corrcoef(y_test, predret_class)
    acc2 = accuracy_score(y_test, predret_class)

    print 'auc:', auc
    print 'mcc:', mcc
    print 'acc1:', acc1
    print 'acc2:', acc2

    return {'loss': -auc, 'status': STATUS_OK, 'model': model}