示例#1
0
def build_model(config: BEGANConfig):
    K.set_image_data_format('channels_last')

    autoencoder = build_autoencoder(config)
    generator = build_generator(config)
    discriminator = build_discriminator(config, autoencoder)

    return autoencoder, generator, discriminator
def set_img_format():
    try:
        if K.backend() == 'theano':
            K.set_image_data_format('channels_first')
        else:
            K.set_image_data_format('channels_last')
    except AttributeError:
        if K._BACKEND == 'theano':
            K.set_image_dim_ordering('th')
        else:
            K.set_image_dim_ordering('tf')
示例#3
0
def test_get_img_shape_on_2d_image():
    n = 5
    channels = 4
    dim1 = 1
    dim2 = 2

    K.set_image_data_format('channels_first')
    assert (n, channels, dim1, dim2) == utils.get_img_shape(K.ones(shape=(n, channels, dim1, dim2)))

    K.set_image_data_format('channels_last')
    assert (n, channels, dim1, dim2) == utils.get_img_shape(K.ones(shape=(n, dim1, dim2, channels)))
示例#4
0
def set_args_and_get_model_from_semantics(args, semantics_json, weights_hd5=None):
    """Recreate a model from a json file specifying model semantics.

    Update the args namespace from the semantics file values.
    Assert that the serialized tensor map and the recreated one are the same.

    Arguments:
        args.tensor_name: String which indicates tensor map to use or None
        args.window_size: sites included in the tensor map
        args.read_limit: Maximum reads included in the tensor map
        args.annotations: List of annotations or None
        semantics_json: Semantics json file (created with serialize_model_semantics())

    Returns:
        The Keras model
    """
    with open(semantics_json, 'r') as infile:
        semantics = json.load(infile)

    if 'model_version' in semantics:
        assert(args.model_version == semantics['model_version'])

    if 'input_tensor_map' in semantics:
        args.tensor_name = semantics['input_tensor_map_name']
        args.window_size = semantics['window_size']
        args.read_limit = semantics['read_limit']
        tm = tensor_maps.get_tensor_channel_map_from_args(args)
        assert(len(tm) == len(semantics['input_tensor_map']))
        for key in tm:
            assert(tm[key] == semantics['input_tensor_map'][key])

    if 'input_annotations' in semantics:
        args.annotations = semantics['input_annotations']
        args.annotation_set = semantics['input_annotation_set']

    args.input_symbols = semantics['input_symbols']
    args.labels = semantics['output_labels']

    if 'channels_last' in semantics:
        args.channels_last = semantics['channels_last']
        if args.channels_last:
            K.set_image_data_format('channels_last')
        else:
            K.set_image_data_format('channels_first')

    if weights_hd5 is None:
        weights_hd5 = os.path.join(os.path.dirname(semantics_json), semantics['architecture'])

    print('Updated arguments:', args, '\nWeight file from:', weights_hd5)
    model = load_model(weights_hd5, custom_objects=get_metric_dict(args.labels))
    model.summary()
    return model
示例#5
0
def test_inceptionresnetv2_notop():
    def target(queue):
        model = applications.InceptionResNetV2(weights=None, include_top=False)
        queue.put(model.output_shape)

    global_image_data_format = K.image_data_format()
    queue = Queue()

    K.set_image_data_format('channels_first')
    p = Process(target=target, args=(queue,))
    p.start()
    p.join()
    K.set_image_data_format(global_image_data_format)
    assert not queue.empty(), 'Model creation failed.'
    model_output_shape = queue.get_nowait()
    assert model_output_shape == (None, 1536, None, None)

    K.set_image_data_format('channels_last')
    p = Process(target=target, args=(queue,))
    p.start()
    p.join()
    K.set_image_data_format(global_image_data_format)
    assert not queue.empty(), 'Model creation failed.'
    model_output_shape = queue.get_nowait()
    assert model_output_shape == (None, None, None, 1536)
def test_segment_3d():
    from keras import backend as K
    K.set_image_data_format("channels_last")  # Set at channels_first in test_deepseg_lesion.test_segment()

    contrast_test = 't2'
    model_path = os.path.join(sct.__sct_dir__, 'data', 'deepseg_sc_models', '{}_sc_3D.h5'.format(contrast_test))   

    fname_t2 = os.path.join(sct.__sct_dir__, 'sct_testing_data/t2/t2.nii.gz')  # install: sct_download_data -d sct_testing_data
    fname_t2_seg = os.path.join(sct.__sct_dir__, 'sct_testing_data/t2/t2_seg.nii.gz')  # install: sct_download_data -d sct_testing_data

    img, gt = _preprocess_segment(fname_t2, fname_t2_seg, contrast_test, dim_3=True)

    seg = deepseg_sc.segment_3d(model_fname=model_path, contrast_type=contrast_test, im_in=img)
    seg_im = img.copy()
    seg_im.data = seg

    assert msct_image.compute_dice(seg_im, gt) > 0.80
示例#7
0
def test_vgg19():
    for data_format in ['channels_first', 'channels_last']:
        K.set_image_data_format(data_format)
        if K.image_data_format() == 'channels_first':
            x = Input(shape=(3, 500, 500))
            pool1_shape = (None, 64, 250, 250)
            pool2_shape = (None, 128, 125, 125)
            pool3_shape = (None, 256, 63, 63)
            pool4_shape = (None, 512, 32, 32)
            drop7_shape = (None, 4096, 16, 16)
            conv1_weight = -0.35009676
        else:
            x = Input(shape=(500, 500, 3))
            pool1_shape = (None, 250, 250, 64)
            pool2_shape = (None, 125, 125, 128)
            pool3_shape = (None, 63, 63, 256)
            pool4_shape = (None, 32, 32, 512)
            drop7_shape = (None, 16, 16, 4096)
            conv1_weight = 0.429471

        encoder = VGG19(x, weights='imagenet', trainable=False)
        feat_pyramid = encoder.outputs

        assert len(feat_pyramid) == 5

        assert K.int_shape(feat_pyramid[0]) == drop7_shape
        assert K.int_shape(feat_pyramid[1]) == pool4_shape
        assert K.int_shape(feat_pyramid[2]) == pool3_shape
        assert K.int_shape(feat_pyramid[3]) == pool2_shape
        assert K.int_shape(feat_pyramid[4]) == pool1_shape

        for layer in encoder.layers:
            if layer.name == 'block1_conv1':
                assert layer.trainable is False
                weights = K.eval(layer.weights[0])
                assert np.allclose(weights[0, 0, 0, 0], conv1_weight)

        encoder_from_scratch = VGG19(x, weights=None, trainable=True)
        for layer in encoder_from_scratch.layers:
            if layer.name == 'block1_conv1':
                assert layer.trainable is True
                weights = K.eval(layer.weights[0])
                assert not np.allclose(weights[0, 0, 0, 0], conv1_weight)
示例#8
0
def test_inceptionresnetv2_notop():
    global_image_data_format = K.image_data_format()

    K.set_image_data_format('channels_first')
    model = applications.InceptionResNetV2(weights=None, include_top=False)
    assert model.output_shape == (None, 1536, None, None)

    K.set_image_data_format('channels_last')
    model = applications.InceptionResNetV2(weights=None, include_top=False)
    assert model.output_shape == (None, None, None, 1536)

    K.set_image_data_format(global_image_data_format)
示例#9
0
def test_smoke_channels_last():
    K.set_image_data_format('channels_last')
    _test_smoke('channels_last')
示例#10
0
def MobileNet(input_shape=None,
              alpha=1.0,
              depth_multiplier=1,
              dropout=1e-3,
              include_top=True,
              weights='imagenet',
              input_tensor=None,
              pooling=None,
              classes=1000):
    """Instantiates the MobileNet architecture.

    Note that only TensorFlow is supported for now,
    therefore it only works with the data format
    `image_data_format='channels_last'` in your Keras config
    at `~/.keras/keras.json`.

    To load a MobileNet model via `load_model`, import the custom
    objects `relu6` and `DepthwiseConv2D` and pass them to the
    `custom_objects` parameter.
    E.g.
    model = load_model('mobilenet.h5', custom_objects={
                       'relu6': mobilenet.relu6,
                       'DepthwiseConv2D': mobilenet.DepthwiseConv2D})

    # Arguments
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)` (with `channels_last` data format)
            or (3, 224, 224) (with `channels_first` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 32.
            E.g. `(200, 200, 3)` would be one valid value.
        alpha: controls the width of the network.
            - If `alpha` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `alpha` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `alpha` = 1, default number of filters from the paper
                 are used at each layer.
        depth_multiplier: depth multiplier for depthwise convolution
            (also called the resolution multiplier)
        dropout: dropout rate
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: `None` (random initialization) or
            `imagenet` (ImageNet weights)
        input_tensor: optional Keras tensor (i.e. output of
            `layers.Input()`)
            to use as image input for the model.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model
                will be the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a
                2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.

    # Returns
        A Keras model instance.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
        RuntimeError: If attempting to run this model with a
            backend that does not support separable convolutions.
    """

    if K.backend() != 'tensorflow':
        raise RuntimeError('Only Tensorflow backend is currently supported, '
                           'as other backends do not support '
                           'depthwise convolution.')

    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as ImageNet with `include_top` '
                         'as true, `classes` should be 1000')

    # Determine proper input shape.
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=32,
                                      data_format=K.image_data_format(),
                                      include_top=include_top or weights)
    if K.image_data_format() == 'channels_last':
        row_axis, col_axis = (0, 1)
    else:
        row_axis, col_axis = (1, 2)
    rows = input_shape[row_axis]
    cols = input_shape[col_axis]

    if weights == 'imagenet':
        if depth_multiplier != 1:
            raise ValueError('If imagenet weights are being loaded, '
                             'depth multiplier must be 1')

        if alpha not in [0.25, 0.50, 0.75, 1.0]:
            raise ValueError('If imagenet weights are being loaded, '
                             'alpha can be one of'
                             '`0.25`, `0.50`, `0.75` or `1.0` only.')

        if rows != cols or rows not in [128, 160, 192, 224]:
            raise ValueError('If imagenet weights are being loaded, '
                             'input must have a static square shape (one of '
                             '(128,128), (160,160), (192,192), or (224, 224)).'
                             ' Input shape provided = %s' % (input_shape,))

    if K.image_data_format() != 'channels_last':
        warnings.warn('The MobileNet family of models is only available '
                      'for the input data format "channels_last" '
                      '(width, height, channels). '
                      'However your settings specify the default '
                      'data format "channels_first" (channels, width, height).'
                      ' You should set `image_data_format="channels_last"` '
                      'in your Keras config located at ~/.keras/keras.json. '
                      'The model being returned right now will expect inputs '
                      'to follow the "channels_last" data format.')
        K.set_image_data_format('channels_last')
        old_data_format = 'channels_first'
    else:
        old_data_format = None

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = _conv_block(img_input, 32, alpha, strides=(2, 2))
    x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)

    x = _depthwise_conv_block(x, 128, alpha, depth_multiplier,
                              strides=(2, 2), block_id=2)
    x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)

    x = _depthwise_conv_block(x, 256, alpha, depth_multiplier,
                              strides=(2, 2), block_id=4)
    x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)

    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier,
                              strides=(2, 2), block_id=6)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)

    x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier,
                              strides=(2, 2), block_id=12)
    x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)

    if include_top:
        if K.image_data_format() == 'channels_first':
            shape = (int(1024 * alpha), 1, 1)
        else:
            shape = (1, 1, int(1024 * alpha))

        x = GlobalAveragePooling2D()(x)
        x = Reshape(shape, name='reshape_1')(x)
        x = Dropout(dropout, name='dropout')(x)
        x = Conv2D(classes, (1, 1),
                   padding='same', name='conv_preds')(x)
        x = Activation('softmax', name='act_softmax')(x)
        x = Reshape((classes,), name='reshape_2')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))

    # load weights
    if weights == 'imagenet':
        if K.image_data_format() == 'channels_first':
            raise ValueError('Weights for "channels_last" format '
                             'are not available.')
        if alpha == 1.0:
            alpha_text = '1_0'
        elif alpha == 0.75:
            alpha_text = '7_5'
        elif alpha == 0.50:
            alpha_text = '5_0'
        else:
            alpha_text = '2_5'

        if include_top:
            model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows)
            weigh_path = BASE_WEIGHT_PATH + model_name
            weights_path = get_file(model_name,
                                    weigh_path,
                                    cache_subdir='models')
        else:
            model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows)
            weigh_path = BASE_WEIGHT_PATH + model_name
            weights_path = get_file(model_name,
                                    weigh_path,
                                    cache_subdir='models')
        model.load_weights(weights_path)

    if old_data_format:
        K.set_image_data_format(old_data_format)
    return model
示例#11
0
def parse_args():
    parser = argparse.ArgumentParser()

    # Required mode argument: what would you like to do?
    parser.add_argument(
        'mode',
        help='High level recipe: write tensors, train, test or evaluate models.'
    )

    # Tensor defining arguments
    parser.add_argument(
        '--tensor_map',
        default='read_tensor',
        help='Key which looks up the map from tensor channels to their meaning.'
    )
    parser.add_argument(
        '--tensor_types',
        nargs='+',
        default=['read_tensor'],
        help=
        'List of keys which look up maps from tensor channels to their meaning. Only used when writing tensors.'
    )
    parser.add_argument(
        '--input_symbols',
        default=defines.inputs_indel,
        help='Dict mapping input symbols to their index within input tensors.')
    parser.add_argument(
        '--batch_size',
        default=32,
        type=int,
        help='Mini batch size for stochastic gradient descent algorithms.')
    parser.add_argument('--read_limit',
                        default=128,
                        type=int,
                        help='Maximum number of reads to load.')
    parser.add_argument('--read_sort',
                        default='base',
                        choices=['base', 'reference_start'],
                        help='How to sort the reads in the tensor.')
    parser.add_argument(
        '--window_size',
        default=128,
        type=int,
        help=
        'Size of sequence window to use as input, typically centered at a variant.'
    )
    parser.add_argument(
        '--channels_last',
        default=True,
        dest='channels_last',
        action='store_true',
        help=
        'Store the channels in the last axis of tensors, tensorflow->true, theano->false'
    )
    parser.add_argument(
        '--channels_first',
        dest='channels_last',
        action='store_false',
        help=
        'Store the channels in the first axis of tensors, tensorflow->false, theano->true'
    )
    parser.add_argument(
        '--base_quality_mode',
        default='phot',
        choices=['phot', 'phred', '1hot'],
        help='How to treat base qualities, must be in [phot, phred, 1hot]')

    # Label defining arguments
    parser.add_argument(
        '--labels',
        default=defines.snp_indel_labels,
        help='Dict mapping label names to their index within label tensors.')
    parser.add_argument(
        '--label_smoothing',
        default=0.0,
        type=float,
        help=
        'Rate of smoothing for class labels  [0.0, 1.0] i.e. [label_smoothing, 1.0-label_smoothing].'
    )
    parser.add_argument(
        '--label_sites',
        default=True,
        dest='label_sites',
        action='store_true',
        help='Truth labels are for variant sites (i.e. not allele specific).')
    parser.add_argument(
        '--label_alleles',
        dest='label_sites',
        action='store_false',
        help=
        'If True then truth labels are allele specific, so one site may have both true and false variants.'
    )

    # Annotation arguments
    parser.add_argument(
        '--annotations',
        help=
        'Array of annotation names, initialised via annotation_set argument')
    parser.add_argument(
        '--annotation_set',
        default='best_practices',
        choices=defines.annotations.keys(),
        help=
        'Key which maps to an annotations list (or None for architectures that do not take annotations).'
    )
    parser.add_argument(
        '--normalize_annotations',
        default=False,
        action='store_true',
        help=
        'If true tensor generators will look for mean and std files and normalize annotations.'
    )
    parser.add_argument(
        '--max_normalize_sites',
        default=1e9,
        help=
        'Maximum number of sites from which to derive normalization values.')
    parser.add_argument(
        '--sample_name',
        default='NA12878',
        help=
        'The sample name from which to gather genotype information from the negative VCF.'
    )

    # Training and optimization related arguments
    parser.add_argument(
        '--epochs',
        default=25,
        type=int,
        help=
        'Number of epochs, typically passes through the entire dataset, not always well-defined.'
    )
    parser.add_argument(
        '--batch_normalization',
        default=False,
        action='store_true',
        help='Mini batch normalization layers after convolutions.')
    parser.add_argument(
        '--samples',
        default=500,
        type=int,
        help='Maximum number of data samples to write or load.')
    parser.add_argument(
        '--patience',
        default=4,
        type=int,
        help=
        'Early Stopping parameter: Maximum number of epochs to run without validation loss improvements.'
    )
    parser.add_argument(
        '--training_steps',
        default=80,
        type=int,
        help='Number of training batches to examine in an epoch.')
    parser.add_argument(
        '--validation_steps',
        default=40,
        type=int,
        help='Number of validation batches to examine in an epoch validation.')
    parser.add_argument(
        '--iterations',
        default=5,
        type=int,
        help=
        'Generic iteration limit for hyperparameter optimization, animation, and other counts.'
    )
    parser.add_argument(
        '--max_parameters',
        default=5e6,
        type=int,
        help=
        'Maximum number of model parameters used for hyperparameter optimization, etc.'
    )

    # Dataset generation related arguments
    parser.add_argument(
        '--downsample_snps',
        default=1.0,
        type=float,
        help='Rate of SNP examples that are kept must be in [0.0, 1.0].')
    parser.add_argument(
        '--downsample_indels',
        default=1.0,
        type=float,
        help='Rate of INDEL examples that are kept must be in [0.0, 1.0].')
    parser.add_argument(
        '--downsample_not_snps',
        default=1.0,
        type=float,
        help='Rate of NOT_SNP examples that are kept must be in [0.0, 1.0].')
    parser.add_argument(
        '--downsample_not_indels',
        default=1.0,
        type=float,
        help='Rate of NOT_INDEL examples that are kept must be in [0.0, 1.0].')
    parser.add_argument(
        '--downsample_reference',
        default=0.001,
        type=float,
        help=
        'Rate of reference genotype examples that are kept must be in [0.0, 1.0].'
    )
    parser.add_argument(
        '--downsample_homozygous',
        default=0.001,
        type=float,
        help='Rate of homozygous genotypes that are kept must be in [0.0, 1.0].'
    )
    parser.add_argument(
        '--start_pos',
        default=0,
        type=int,
        help='Genomic position start for parallel tensor writing.')
    parser.add_argument(
        '--end_pos',
        default=0,
        type=int,
        help='Genomic position end for parallel tensor writing.')
    parser.add_argument(
        '--skip_positive_class',
        default=False,
        action='store_true',
        help='Whether to skip positive examples when writing tensors.')
    parser.add_argument(
        '--use_lowercase_dna',
        default=False,
        action='store_true',
        help=
        'Whether to include tensors covering lower case bases when writing tensors.'
    )
    parser.add_argument(
        '--valid_ratio',
        default=0.1,
        type=float,
        help=
        'Rate of training tensors to save for validation must be in [0.0, 1.0].'
    )
    parser.add_argument(
        '--test_ratio',
        default=0.2,
        type=float,
        help='Rate of training tensors to save for testing [0.0, 1.0].')
    parser.add_argument(
        '--valid_contigs',
        nargs='+',
        default=['18', '19', 'chr18', 'chr19'],
        help=
        'Contigs to reserve for validation data in addition to those reserved by valid_ratio.'
    )
    parser.add_argument(
        '--test_contigs',
        nargs='+',
        default=['20', '21', 'chr20', 'chr21'],
        help=
        'Contigs to reserve for testing data in addition to those reserved by test_ratio.'
    )
    parser.add_argument('--chrom',
                        help='Chromosome to load for parallel tensor writing.')

    # Input files and directories: vcfs, bams, beds, hd5, fasta
    parser.add_argument('--image_dir',
                        default=None,
                        help='Directory to write images and plots to.')
    parser.add_argument(
        '--weights_hd5',
        default='',
        help=
        'A hd5 file of weights to initialize a model, will use all layers with names that match.'
    )
    parser.add_argument(
        '--architecture',
        default='',
        help=
        'A hd5 file of specifying weights and architecture of a neural net.')
    parser.add_argument(
        '--architectures',
        nargs='+',
        help='Specify one or more architecture configuration files.')
    parser.add_argument(
        '--bam_file',
        default=defines.bam_file,
        help='Path to a BAM file to train from or generate tensors with.')
    parser.add_argument(
        '--train_vcf',
        default=defines.nist_vcf,
        help=
        'Path to a VCF that has verified true calls from NIST, platinum genomes, etc.'
    )
    parser.add_argument(
        '--annotation_vcf',
        default=None,
        help=
        'Path to a VCF that has annotations (typically from Haplotype Caller).'
    )
    parser.add_argument(
        '--negative_vcf',
        default=defines.negative_vcf,
        help=
        'Haplotype Caller or VQSR generated VCF with raw annotation values [and quality scores].'
    )
    parser.add_argument(
        '--ignore_vcf',
        default=None,
        help='Optional VCF of sites to ignore when doing evaluations.')
    parser.add_argument(
        '--include_vcf',
        default=None,
        help=
        'Optional VCF of sites to include while ignoring all other sites when doing evaluations.'
    )
    parser.add_argument('--output_vcf',
                        default=None,
                        help='Optional VCF to write to.')
    parser.add_argument('--output_dir',
                        default='./weights/',
                        help='Directory to write models or other data out.')
    parser.add_argument(
        '--deep_variant_vcf',
        default=None,
        help=
        'Optional VCF with Google deep variant QUAL scores for comparisons.')
    parser.add_argument(
        '--bed_file',
        default=defines.nist_bed_file,
        help=
        'Bed file specifying high confidence intervals associated with args.train_vcf.'
    )
    parser.add_argument(
        '--data_dir',
        default=defines.data_dir,
        help=
        'Directory of tensors, must be split into test/valid/train sets with directories for each label within.'
    )
    parser.add_argument('--reference_fasta',
                        default=defines.reference_fasta,
                        help='The reference FASTA file (e.g. HG19 or HG38).')

    # Evaluation related arguments
    parser.add_argument(
        '--multiallelics',
        default='include',
        choices=['include', 'only', 'ignore'],
        help=
        'How to handle multiallelic sites: can be include, only, or ignore.')
    parser.add_argument(
        '--random_forest_training_sites',
        default='include',
        choices=['include', 'only', 'ignore'],
        help=
        'How to handle Random Forest Training sites: can be include, only, or ignore. Only used in gnomad evaluation.'
    )
    parser.add_argument(
        '--emit_interesting_sites',
        default=False,
        action='store_true',
        help=
        'Emit sites where classification algorithms disagree or of extreme CNN scores. Only used in gnomad evaluation.'
    )
    parser.add_argument(
        '--single_sample_vqsr',
        default=False,
        action='store_true',
        help=
        'Include single sample VQSR results in ROC curve. Only used in gnomad evaluation.'
    )
    parser.add_argument(
        '--gnomad_ac_max',
        default=1e10,
        type=int,
        help=
        'gnomAD allele count maximum value, set arbitrarily high by default.')
    parser.add_argument(
        '--gnomad_ac_min',
        default=0,
        type=int,
        help='gnomAD allele count minimum value, set to 0 by default.')
    parser.add_argument(
        '--score_keys',
        nargs='+',
        default=['VQSLOD'],
        help='List of variant score keys for performance comparisons.')
    parser.add_argument(
        '--inspect_model',
        default=False,
        action='store_true',
        help='Plot model architecture, measure inference and training speeds.')
    parser.add_argument(
        '--inspect_show_labels',
        default=False,
        action='store_true',
        help='Plot model architecture with labels for each layer.')
    parser.add_argument('--gnomad_compare',
                        default=False,
                        action='store_true',
                        help='Compare to gnomad random forest and VQSR.')
    parser.add_argument(
        '--hard_filter_compare',
        default=False,
        action='store_true',
        help=
        'Compare to GATK best practices hard filters and filter from CHM syndip paper.'
    )
    parser.add_argument(
        '--baseline_key',
        help='String to identify baseline model to compare against.')

    # Run specific arguments
    parser.add_argument(
        '--id',
        default='no_id',
        help=
        'Identifier for this run, user-defined string to keep experiments organized.'
    )
    parser.add_argument(
        '--random_seed',
        default=12878,
        type=int,
        help='Random seed to use throughout run.  Always use np.random.')

    # Parse, print, set annotations, image data format and seed
    args = parser.parse_args()
    args.annotations = defines.annotations_from_args(args)
    np.random.seed(args.random_seed)
    args.batch_size = min(args.samples, args.batch_size)

    if not is_broad_cluster():
        if args.channels_last:
            K.set_image_data_format('channels_last')
        else:
            K.set_image_data_format('channels_first')

    print('Arguments are', args)

    return args
示例#12
0
def setup():
    K.set_image_data_format('channels_last')
    K.set_learning_phase(1)
示例#13
0
import numpy as np
import sys
import time
import matplotlib.pyplot as plt

from keras.models import Model, load_model
from keras.layers import Input, Activation, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose, UpSampling2D, ZeroPadding2D, BatchNormalization
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint
from keras import backend as K
import tensorflow as tf

from data import load_train_data, load_test_data
from utils import *

K.set_image_data_format('channels_last')  # Tensorflow dimension ordering

data_path  = sys.argv[1] + "/"
model_path = data_path + "models/"

# dir for storing results that contains
rst_path = data_path + "test-records/"
if not os.path.exists(rst_path):
    os.makedirs(rst_path)

model_to_test = sys.argv[2]
cur_fold = sys.argv[3]
plane = sys.argv[4]
im_z = int(sys.argv[5])
im_y = int(sys.argv[6])
im_x = int(sys.argv[7])
示例#14
0
from __future__ import print_function

import os
from skimage.transform import resize
from skimage.io import imsave
import numpy as np
from keras.models import Model
from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras import backend as K

# from data_crop_and_convert import load_train_data, load_test_data
from gland_data_pre import load_train_data, load_test_data

K.set_image_data_format('channels_last')  # TF dimension ordering in this code

img_rows = 480
img_cols = 480

smooth = 1.


def dice_coef(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) +
                                           smooth)

示例#15
0
def MobileNet(input_shape=None,
              alpha=1.0,
              depth_multiplier=1,
              dropout=1e-3,
              include_top=True,
              weights='imagenet',
              input_tensor=None,
              pooling=None,
              classes=1000,
              channels="gray"):
    """Instantiates the MobileNet architecture.

    Note that only TensorFlow is supported for now,
    therefore it only works with the data format
    `image_data_format='channels_last'` in your Keras config
    at `~/.keras/keras.json`.

    To load a MobileNet model via `load_model`, import the custom
    objects `relu6` and `DepthwiseConv2D` and pass them to the
    `custom_objects` parameter.
    E.g.
    model = load_model('mobilenet.h5', custom_objects={
                       'relu6': mobilenet.relu6,
                       'DepthwiseConv2D': mobilenet.DepthwiseConv2D})

    # Arguments
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)` (with `channels_last` data format)
            or (3, 224, 224) (with `channels_first` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 32.
            E.g. `(200, 200, 3)` would be one valid value.
        alpha: controls the width of the network.
            - If `alpha` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `alpha` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `alpha` = 1, default number of filters from the paper
                 are used at each layer.
        depth_multiplier: depth multiplier for depthwise convolution
            (also called the resolution multiplier)
        dropout: dropout rate
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization),
              'imagenet' (pre-training on ImageNet),
              or the path to the weights file to be loaded.
        input_tensor: optional Keras tensor (i.e. output of
            `layers.Input()`)
            to use as image input for the model.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model
                will be the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a
                2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.

    # Returns
        A Keras model instance.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
        RuntimeError: If attempting to run this model with a
            backend that does not support separable convolutions.
    """

    if K.backend() != 'tensorflow':
        raise RuntimeError('Only TensorFlow backend is currently supported, '
                           'as other backends do not support '
                           'depthwise convolution.')

    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as ImageNet with `include_top` '
                         'as true, `classes` should be 1000')

    if K.image_data_format() == 'channels_last':
        row_axis, col_axis = (0, 1)
    else:
        row_axis, col_axis = (1, 2)
    rows = input_shape[row_axis]
    cols = input_shape[col_axis]

    if weights == 'imagenet':
        if depth_multiplier != 1:
            raise ValueError('If imagenet weights are being loaded, '
                             'depth multiplier must be 1')

        if alpha not in [0.25, 0.50, 0.75, 1.0]:
            raise ValueError('If imagenet weights are being loaded, '
                             'alpha can be one of'
                             '`0.25`, `0.50`, `0.75` or `1.0` only.')

        if rows != cols or rows not in [128, 160, 192, 224]:
            raise ValueError('If imagenet weights are being loaded, '
                             'input must have a static square shape (one of '
                             '(128,128), (160,160), (192,192), or (224, 224)).'
                             ' Input shape provided = %s' % (input_shape, ))

    if K.image_data_format() != 'channels_last':
        warnings.warn('The MobileNet family of models is only available '
                      'for the input data format "channels_last" '
                      '(width, height, channels). '
                      'However your settings specify the default '
                      'data format "channels_first" (channels, width, height).'
                      ' You should set `image_data_format="channels_last"` '
                      'in your Keras config located at ~/.keras/keras.json. '
                      'The model being returned right now will expect inputs '
                      'to follow the "channels_last" data format.')
        K.set_image_data_format('channels_last')
        old_data_format = 'channels_first'
    else:
        old_data_format = None

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = _conv_block(img_input, 32, alpha, strides=(2, 2))
    x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)

    x = _depthwise_conv_block(x,
                              128,
                              alpha,
                              depth_multiplier,
                              strides=(2, 2),
                              block_id=2)
    x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)

    x = _depthwise_conv_block(x,
                              256,
                              alpha,
                              depth_multiplier,
                              strides=(2, 2),
                              block_id=4)
    x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)

    x = _depthwise_conv_block(x,
                              512,
                              alpha,
                              depth_multiplier,
                              strides=(2, 2),
                              block_id=6)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)

    x = _depthwise_conv_block(x,
                              1024,
                              alpha,
                              depth_multiplier,
                              strides=(2, 2),
                              block_id=12)
    x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)

    if include_top:
        if K.image_data_format() == 'channels_first':
            shape = (int(1024 * alpha), 1, 1)
        else:
            shape = (1, 1, int(1024 * alpha))

        x = GlobalAveragePooling2D()(x)
        x = Reshape(shape, name='reshape_1')(x)
        x = Dropout(dropout, name='dropout')(x)
        x = Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)
        x = Activation('softmax', name='act_softmax')(x)
        x = Reshape((classes, ), name='reshape_2')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))

    # load weights
    if weights == 'imagenet':
        if K.image_data_format() == 'channels_first':
            raise ValueError('Weights for "channels_last" format '
                             'are not available.')
        if alpha == 1.0:
            alpha_text = '1_0'
        elif alpha == 0.75:
            alpha_text = '7_5'
        elif alpha == 0.50:
            alpha_text = '5_0'
        else:
            alpha_text = '2_5'

        if include_top:
            model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows)
            weigh_path = BASE_WEIGHT_PATH + model_name
            weights_path = get_file(model_name,
                                    weigh_path,
                                    cache_subdir='models')
        else:
            if channels == "gray":
                if alpha == 0.75:
                    weights_path = "/users/ipan/mobilenet_gray_75.h5"
                else:
                    weights_path = "mobilenet_gray.h5"
            else:
                model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text,
                                                               rows)
                weigh_path = BASE_WEIGHT_PATH + model_name
                weights_path = get_file(model_name,
                                        weigh_path,
                                        cache_subdir='models')
        model.load_weights(weights_path)
    elif weights is not None:
        model.load_weights(weights)

    if old_data_format:
        K.set_image_data_format(old_data_format)
    return model
示例#16
0

# network and training
NB_EPOCH = 20
BATCH_SIZE = 128
VERBOSE = 1
OPTIMIZER = Adam()
VALIDATION_SPLIT = 0.2

IMG_ROWS, IMG_COLS = 28, 28  # input image dimensions
NB_CLASSES = 10  # number of outputs = number of digits
INPUT_SHAPE = (IMG_ROWS, IMG_COLS, 1)

# data: shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
k.set_image_data_format("channels_last")
# k.set_image_data_format("channels_first")

# consider them as float and normalize
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255

# we need a 60K x [1 x 28 x 28] shape as input to the CONVNET
X_train = X_train[:, :, :, np.newaxis]
X_test = X_test[:, :, :, np.newaxis]

print(X_train.shape, 'train samples')
print(X_test.shape, 'test samples')
示例#17
0
def pytorch_to_keras(model,
                     args,
                     input_shape,
                     change_ordering=False,
                     training=False,
                     verbose=False):
    """
    By given pytorch model convert layers with specified convertors.

    Args:
        model: pytorch model
        args: pytorch model arguments
        input_shape: keras input shape (using for InputLayer creation)
        change_ordering: change NCHW to NHWC
        training: switch model to training mode
        verbose: verbose output

    Returns:
        model: created keras model.
    """

    # PyTorch JIT tracing
    args = (args, ) if isinstance(args, torch.autograd.Variable) else args

    orig_state_dict_keys = _unique_state_dict(model).keys()

    with set_training(model, training):
        trace, torch_out = torch.jit.get_trace_graph(model, args)

    if orig_state_dict_keys != _unique_state_dict(model).keys():
        raise RuntimeError("state_dict changed after running the tracer; "
                           "something weird is happening in your model!")

    # _optimize_trace(trace, False)
    trace.set_graph(_optimize_graph(trace.graph(), False))

    if verbose:
        print(trace.graph())

    if verbose:
        print(list(trace.graph().outputs()))

    # Get all graph nodes
    nodes = list(trace.graph().nodes())

    # Collect graph outputs
    graph_outputs = [n.uniqueName() for n in trace.graph().outputs()]
    print('Graph outputs:', graph_outputs)

    # Collect model state dict
    state_dict = _unique_state_dict(model)
    if verbose:
        print('State dict:', list(state_dict))

    import re
    import keras
    from keras import backend as K
    K.set_image_data_format('channels_first')

    layers = dict()
    layers['input'] = keras.layers.InputLayer(input_shape=input_shape,
                                              name='input').output

    outputs = []

    for node in nodes:
        node_inputs = list(node.inputs())
        node_input_names = []
        for node_input in node_inputs:
            if node_input.node().scopeName():
                node_input_names.append(get_node_id(node_input.node()))

        if len(node_input_names) == 0:
            node_input_names.append('input')

        node_type = node.kind()

        node_scope_name = node.scopeName()
        node_id = get_node_id(node)
        node_weights_name = '.'.join(
            re.findall(r'\[([\w\d.]+)\]', node_scope_name))
        node_attrs = {k: node[k] for k in node.attributeNames()}

        node_outputs = list(node.outputs())
        node_outputs_names = []
        for node_output in node_outputs:
            if node_output.node().scopeName():
                node_outputs_names.append(node_output.node().scopeName())

        if verbose:
            print(' ____ ')
            print('graph node:', node_scope_name)
            print('type:', node_type)
            print('inputs:', node_input_names)
            print('outputs:', node_outputs_names)
            print('name in state_dict:', node_weights_name)
            print('attrs:', node_attrs)
            print('node_id:', node_id)
            print('is_terminal:', node_id in graph_outputs)
        AVAILABLE_CONVERTERS[node_type](params=node_attrs,
                                        w_name=node_weights_name,
                                        scope_name=node_id,
                                        inputs=node_input_names,
                                        layers=layers,
                                        weights=state_dict)
        if node_id in graph_outputs:
            outputs.append(layers[node_id])

    model = keras.models.Model(inputs=layers['input'], outputs=outputs)
    model.summary()

    if change_ordering:
        # Change from 'NCW' to 'NWC' ordering customary in tf
        import numpy as np
        config = model.get_config()
        output_shape = None
        for layer_type, lc in ((layer['class_name'], layer['config'])
                               for layer in config['layers']):

            if 'batch_input_shape' in lc:
                if len(lc['batch_input_shape']) == 3:
                    N, C, W = lc['batch_input_shape']
                    lc['batch_input_shape'] = (N, W, C)
                elif len(lc['batch_input_shape']) == 4:
                    N, C, H, W = lc['batch_input_shape']
                    lc['batch_input_shape'] = (N, H, W, C)
                else:
                    raise NotImplementedError(
                        "len(batch_input_shape) should be either 3 or 4")
                output_shape = lc['batch_input_shape']

            if layer_type == 'Con1D':
                (N, W, _), K = output_shape, lc['kernel_size'][0]
                C = lc['filters']
                W -= K - 1
                output_shape = (N, W, C)

            if 'target_shape' in lc:
                lc['target_shape'] = tuple(
                    np.reshape(
                        np.array([
                            list(lc['target_shape'][1:][:]),
                            lc['target_shape'][0]
                        ]), -1))

            if 'data_format' in lc:
                lc['data_format'] = 'channels_last'

            if 'axis' in lc:
                lc['axis'] = len(output_shape) - 1

        K.set_image_data_format('channels_last')

        # # For theano:
        # from keras.utils.layer_utils import convert_all_kernels_in_model
        # convert_all_kernels_in_model(model)

        # Set the weights into the model with new ordering
        # `Dense` layers after `Flatten` have their weights transposed.
        src_weights = []
        last_was_flatten = False
        last_shape = None
        for layer in model.layers:
            W = layer.get_weights()
            if last_was_flatten and W:
                assert len(last_shape) == 3, str(last_shape)
                A, b = W
                _, C, H = last_shape
                A.shape = (C, H, -1)
                A = np.ascontiguousarray(np.swapaxes(A, 0, 1))
                A.shape = (H * C, -1)
                W = [A, b]
                last_was_flatten = False
            if isinstance(layer, keras.layers.core.Flatten):
                last_was_flatten = True
            elif not last_was_flatten:
                last_shape = layer.output_shape
            src_weights.append(W)

        if K.backend() == 'tensorflow':
            # Tensorflow needs a new graph for the converted model
            # to retain the same scopes for the operators.
            import tensorflow as tf
            tf.reset_default_graph()
            K.set_session(tf.Session())
            model_tf_ordering = keras.models.Model.from_config(config)
            for dst, src in zip(model_tf_ordering.layers, src_weights):
                dst.set_weights(src)
        else:
            model_tf_ordering = keras.models.Model.from_config(config)
            for dst, src in zip(model_tf_ordering.layers, src_weights):
                dst.set_weights(src)

        model = model_tf_ordering

    return model
示例#18
0
def NASNet(input_shape=None,
           penultimate_filters=4032,
           nb_blocks=6,
           stem_filters=96,
           skip_reduction=True,
           use_auxiliary_branch=False,
           filters_multiplier=2,
           dropout=0.5,
           weight_decay=5e-5,
           include_top=True,
           weights=None,
           input_tensor=None,
           pooling=None,
           classes=1000,
           default_size=None):
    """Instantiates a NASNet architecture.
    Note that only TensorFlow is supported for now,
    therefore it only works with the data format
    `image_data_format='channels_last'` in your Keras config
    at `~/.keras/keras.json`.

    # Arguments
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(331, 331, 3)` for NASNetLarge or
            `(224, 224, 3)` for NASNetMobile
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 32.
            E.g. `(224, 224, 3)` would be one valid value.
        penultimate_filters: number of filters in the penultimate layer.
            NASNet models use the notation `NASNet (N @ P)`, where:
                -   N is the number of blocks
                -   P is the number of penultimate filters
        nb_blocks: number of repeated blocks of the NASNet model.
            NASNet models use the notation `NASNet (N @ P)`, where:
                -   N is the number of blocks
                -   P is the number of penultimate filters
        stem_filters: number of filters in the initial stem block
        skip_reduction: Whether to skip the reduction step at the tail
            end of the network. Set to `False` for CIFAR models.
        use_auxiliary_branch: Whether to use the auxiliary branch during
            training or evaluation.
        filters_multiplier: controls the width of the network.
            - If `filters_multiplier` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `filters_multiplier` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `filters_multiplier` = 1, default number of filters from the paper
                 are used at each layer.
        dropout: dropout rate
        weight_decay: l2 regularization weight
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: `None` (random initialization) or
            `imagenet` (ImageNet weights)
        input_tensor: optional Keras tensor (i.e. output of
            `layers.Input()`)
            to use as image input for the model.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model
                will be the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a
                2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.
        default_size: specifies the default image size of the model
    # Returns
        A Keras model instance.
    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
        RuntimeError: If attempting to run this model with a
            backend that does not support separable convolutions.
    """
    if K.backend() != 'tensorflow':
        raise RuntimeError('Only Tensorflow backend is currently supported, '
                           'as other backends do not support '
                           'separable convolution.')

    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as ImageNet with `include_top` '
                         'as true, `classes` should be 1000')

    if default_size is None:
        default_size = 331

    # Determine proper input shape and default size.
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=default_size,
                                      min_size=32,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top or weights)

    if K.image_data_format() != 'channels_last':
        warnings.warn('The NASNet family of models is only available '
                      'for the input data format "channels_last" '
                      '(width, height, channels). '
                      'However your settings specify the default '
                      'data format "channels_first" (channels, width, height).'
                      ' You should set `image_data_format="channels_last"` '
                      'in your Keras config located at ~/.keras/keras.json. '
                      'The model being returned right now will expect inputs '
                      'to follow the "channels_last" data format.')
        K.set_image_data_format('channels_last')
        old_data_format = 'channels_first'
    else:
        old_data_format = None

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    assert penultimate_filters % 24 == 0, "`penultimate_filters` needs to be divisible " \
                                          "by 24."

    channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
    filters = penultimate_filters // 24

    if not skip_reduction:
        x = Conv2D(stem_filters, (3, 3),
                   strides=(2, 2),
                   padding='valid',
                   use_bias=False,
                   name='stem_conv1',
                   kernel_initializer='he_normal',
                   kernel_regularizer=l2(weight_decay))(img_input)
    else:
        x = Conv2D(stem_filters, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   use_bias=False,
                   name='stem_conv1',
                   kernel_initializer='he_normal',
                   kernel_regularizer=l2(weight_decay))(img_input)

    x = BatchNormalization(axis=channel_dim,
                           momentum=_BN_DECAY,
                           epsilon=_BN_EPSILON,
                           name='stem_bn1')(x)

    p = None
    if not skip_reduction:  # imagenet / mobile mode
        x, p = _reduction_A(x,
                            p,
                            filters // (filters_multiplier**2),
                            weight_decay,
                            id='stem_1')
        x, p = _reduction_A(x,
                            p,
                            filters // filters_multiplier,
                            weight_decay,
                            id='stem_2')

    for i in range(nb_blocks):
        x, p = _normal_A(x, p, filters, weight_decay, id='%d' % (i))

    x, p0 = _reduction_A(x,
                         p,
                         filters * filters_multiplier,
                         weight_decay,
                         id='reduce_%d' % (nb_blocks))

    p = p0 if not skip_reduction else p

    for i in range(nb_blocks):
        x, p = _normal_A(x,
                         p,
                         filters * filters_multiplier,
                         weight_decay,
                         id='%d' % (nb_blocks + i + 1))

    auxiliary_x = None
    if not skip_reduction:  # imagenet / mobile mode
        if use_auxiliary_branch:
            auxiliary_x = _add_auxiliary_head(x, classes, weight_decay)

    x, p0 = _reduction_A(x,
                         p,
                         filters * filters_multiplier**2,
                         weight_decay,
                         id='reduce_%d' % (2 * nb_blocks))

    if skip_reduction:  # CIFAR mode
        if use_auxiliary_branch:
            auxiliary_x = _add_auxiliary_head(x, classes, weight_decay)

    p = p0 if not skip_reduction else p

    for i in range(nb_blocks):
        x, p = _normal_A(x,
                         p,
                         filters * filters_multiplier**2,
                         weight_decay,
                         id='%d' % (2 * nb_blocks + i + 1))

    x = Activation('relu')(x)

    if include_top:
        x = GlobalAveragePooling2D()(x)
        x = Dropout(dropout)(x)
        x = Dense(classes,
                  activation='softmax',
                  kernel_regularizer=l2(weight_decay),
                  name='predictions')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    if use_auxiliary_branch:
        model = Model(inputs, [x, auxiliary_x], name='NASNet_with_auxiliary')
    else:
        model = Model(inputs, x, name='NASNet')

    # load weights
    if weights == 'imagenet':
        if default_size == 224:  # mobile version
            if include_top:
                if use_auxiliary_branch:
                    weight_path = NASNET_MOBILE_WEIGHT_PATH_WITH_AUXULARY
                    model_name = 'nasnet_mobile_with_aux.h5'
                else:
                    weight_path = NASNET_MOBILE_WEIGHT_PATH
                    model_name = 'nasnet_mobile.h5'
            else:
                if use_auxiliary_branch:
                    weight_path = NASNET_MOBILE_WEIGHT_PATH_WITH_AUXULARY_NO_TOP
                    model_name = 'nasnet_mobile_with_aux_no_top.h5'
                else:
                    weight_path = NASNET_MOBILE_WEIGHT_PATH_NO_TOP
                    model_name = 'nasnet_mobile_no_top.h5'

            weights_file = get_file(model_name,
                                    weight_path,
                                    cache_subdir='models')
            model.load_weights(weights_file, by_name=True)

        elif default_size == 331:  # large version
            if include_top:
                if use_auxiliary_branch:
                    weight_path = NASNET_LARGE_WEIGHT_PATH_WITH_auxiliary
                    model_name = 'nasnet_large_with_aux.h5'
                else:
                    weight_path = NASNET_LARGE_WEIGHT_PATH
                    model_name = 'nasnet_large.h5'
            else:
                if use_auxiliary_branch:
                    weight_path = NASNET_LARGE_WEIGHT_PATH_WITH_auxiliary_NO_TOP
                    model_name = 'nasnet_large_with_aux_no_top.h5'
                else:
                    weight_path = NASNET_LARGE_WEIGHT_PATH_NO_TOP
                    model_name = 'nasnet_large_no_top.h5'

            weights_file = get_file(model_name,
                                    weight_path,
                                    cache_subdir='models')
            model.load_weights(weights_file, by_name=True)

        else:
            raise ValueError(
                'ImageNet weights can only be loaded on NASNetLarge or NASNetMobile'
            )

    if old_data_format:
        K.set_image_data_format(old_data_format)

    return model
from keras.layers.convolutional import MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Model

import json
"""
data_format: A string, one of channels_last (default) or channels_first.
channels_last corresponds to inputs with shape (batch, height, width, channels).
channels_first corresponds to inputs with shape (batch, channels, height, width).
It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json.
I recommend using default setting rather than using explicit declaration.

axis=1 <--> data_format='channels_first'
axis=-1 <-> data_format='channels_last'
"""
set_image_data_format('channels_last')
set_image_dim_ordering('tf')

# weight_decay = 0.0001
from keras.regularizers import l2


class Tiramisu():
    def __init__(self,
                 input_shape=(224, 224, 3),
                 classes=12,
                 first_conv_filters=48,
                 growth_rate=12,
                 pools=5,
                 block_layers=[4, 5, 7, 10, 12, 15, 12, 10, 7, 5, 4]):
    def __init__(self):

        K.set_image_data_format('channels_last')  # set format
        K.set_image_dim_ordering('tf')
        self.DEBUG = 1

        self.crop_size_g = (64, 64, 64)
        self.crop_size_d = (24, 24, 24)

        self.channels = 1
        self.input_shape_g = self.crop_size_g + (self.channels, )
        self.input_shape_d = self.crop_size_d + (self.channels, )
        self.output_shape_g = (24, 24, 24) + (
            3, )  # phi has three outputs. one for each X, Y, and Z dimensions
        self.output_shape_d = (6, 6, 6) + (self.channels, )
        self.output_shape_d_v2 = (2, 2, 2) + (self.channels, )

        self.batch_sz = 1  # for testing locally to avoid memory allocation

        # Number of filters in the first layer of G and D
        self.gf = 64
        self.df = 64

        # Train the discriminator faster than the generator
        optimizerD = Adam(
            0.001, decay=0.05
        )  # in the paper the learning rate is 0.001 and weight decay is 0.5
        self.decay = 0.5
        self.iterations_decay = 50
        self.learning_rate = 0.001
        optimizerG = Adam(
            0.001,
            decay=0.05)  # in the paper the decay after 50K iterations by 0.5

        # Build and compile the discriminator
        self.discriminator = self.build_discriminator()
        self.discriminator.summary()
        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=optimizerD,
                                   metrics=['accuracy'])

        # Build the generator
        self.generator = self.build_generator()
        self.generator.summary()
        # Build the deformable transformation layer
        self.transformation = self.build_transformation()
        self.transformation.summary()

        # Input images
        img_S = Input(shape=self.input_shape_g)  # subject image S
        img_T = Input(shape=self.input_shape_g)  # template image T

        # By conditioning on T generate a warped transformation function of S
        phi = self.generator([img_S, img_T])

        # Transform S
        warped_S = self.transformation([img_S, phi])

        # Use Python partial to provide loss function with additional deformable field argument
        partial_gp_loss = partial(self.gradient_penalty_loss, phi=phi)
        partial_gp_loss.__name__ = 'gradient_penalty'  # Keras requires function names

        # For the combined model we will only train the generator
        self.discriminator.trainable = False

        # Discriminators determines validity of translated images / condition pairs
        validity = self.discriminator([warped_S, img_T])

        self.combined = Model(inputs=[img_S, img_T], outputs=validity)
        self.combined.summary()
        self.combined.compile(loss=partial_gp_loss, optimizer=optimizerG)

        if self.DEBUG:
            log_path = '/nrs/scicompsoft/elmalakis/GAN_Registration_Data/flydata/forSalma/lo_res/logs_ganunet/'
            os.makedirs(log_path, exist_ok=True)
            self.callback = TensorBoard(log_path)
            self.callback.set_model(self.combined)

        self.data_loader = DataLoader(batch_sz=self.batch_sz,
                                      dataset_name='fly',
                                      use_golden=True)
示例#21
0
def MobileNet_sep(input_shape=None,
              alpha=1.0,
              depth_multiplier=1,
              dropout=1e-3,
              include_top=True,
              input_tensor=None,
              pooling=None,
              classes=1000):

    # Determine proper input shape and default size.
    if input_shape is None:
        default_size = 224
    else:
        if K.image_data_format() == 'channels_first':
            rows = input_shape[1]
            cols = input_shape[2]
        else:
            rows = input_shape[0]
            cols = input_shape[1]

        if rows == cols and rows in [128, 160, 192, 224]:
            default_size = rows
        else:
            default_size = 224

    input_shape = _obtain_input_shape(input_shape,
                                      default_size=default_size,
                                      min_size=32,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top,
                                      weights=None)

    if K.image_data_format() == 'channels_last':
        row_axis, col_axis = (0, 1)
    else:
        row_axis, col_axis = (1, 2)
    rows = input_shape[row_axis]
    cols = input_shape[col_axis]

    if K.image_data_format() != 'channels_last':
        warnings.warn('The MobileNet family of models is only available '
                      'for the input data format "channels_last" '
                      '(width, height, channels). '
                      'However your settings specify the default '
                      'data format "channels_first" (channels, width, height).'
                      ' You should set `image_data_format="channels_last"` '
                      'in your Keras config located at ~/.keras/keras.json. '
                      'The model being returned right now will expect inputs '
                      'to follow the "channels_last" data format.')
        K.set_image_data_format('channels_last')
        old_data_format = 'channels_first'
    else:
        old_data_format = None

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = _conv_block(img_input, 32, alpha, strides=(2, 2))
    x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)

    x = _depthwise_conv_block(x, 128, alpha, depth_multiplier,
                              strides=(2, 2), block_id=2)
    x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)

    x = _depthwise_conv_block(x, 256, alpha, depth_multiplier,
                              strides=(2, 2), block_id=4)
    x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)

    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier,
                              strides=(2, 2), block_id=6)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)

    x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier,
                              strides=(2, 2), block_id=12)
    x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)

    if include_top:
        if K.image_data_format() == 'channels_first':
            shape = (int(1024 * alpha), 1, 1)
        else:
            shape = (1, 1, int(1024 * alpha))

        x = GlobalAveragePooling2D()(x)
        x = Reshape(shape, name='reshape_1')(x)
        x = Dropout(dropout, name='dropout')(x)
        x = Conv2D(classes, (1, 1),
                   padding='same', name='conv_preds')(x)
        x = Activation('softmax', name='act_softmax')(x)
        x = Reshape((classes,), name='reshape_2')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))

    if old_data_format:
        K.set_image_data_format(old_data_format)
    return model
示例#22
0
def Xception(include_top=True, weights='imagenet',
             input_tensor=None, input_shape=None,
             pooling=None,
             classes=1000):
    """Instantiates the Xception architecture.

    Optionally loads weights pre-trained
    on ImageNet. This model is available for TensorFlow only,
    and can only be used with inputs following the TensorFlow
    data format `(width, height, channels)`.
    You should set `image_data_format="channels_last"` in your Keras config
    located at ~/.keras/keras.json.

    Note that the default input image size for this model is 299x299.

    # Arguments
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization)
            or "imagenet" (pre-training on ImageNet).
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(299, 299, 3)`.
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 71.
            E.g. `(150, 150, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.

    # Returns
        A Keras model instance.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
        RuntimeError: If attempting to run this model with a
            backend that does not support separable convolutions.
    """
    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')

    if K.backend() != 'tensorflow':
        raise RuntimeError('The Xception model is only available with '
                           'the TensorFlow backend.')
    if K.image_data_format() != 'channels_last':
        warnings.warn('The Xception model is only available for the '
                      'input data format "channels_last" '
                      '(width, height, channels). '
                      'However your settings specify the default '
                      'data format "channels_first" (channels, width, height). '
                      'You should set `image_data_format="channels_last"` in your Keras '
                      'config located at ~/.keras/keras.json. '
                      'The model being returned right now will expect inputs '
                      'to follow the "channels_last" data format.')
        K.set_image_data_format('channels_last')
        old_data_format = 'channels_first'
    else:
        old_data_format = None

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=299,
                                      min_size=71,
                                      data_format=K.image_data_format(),
                                      include_top=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False, name='block1_conv1')(img_input)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)
    x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Conv2D(128, (1, 1), strides=(2, 2),
                      padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv1')(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv2')(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block2_pool')(x)
    x = layers.add([x, residual])

    residual = Conv2D(256, (1, 1), strides=(2, 2),
                      padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block3_sepconv1_act')(x)
    x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv1')(x)
    x = BatchNormalization(name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv2')(x)
    x = BatchNormalization(name='block3_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block3_pool')(x)
    x = layers.add([x, residual])

    residual = Conv2D(728, (1, 1), strides=(2, 2),
                      padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block4_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(x)
    x = BatchNormalization(name='block4_sepconv1_bn')(x)
    x = Activation('relu', name='block4_sepconv2_act')(x)
    x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv2')(x)
    x = BatchNormalization(name='block4_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block4_pool')(x)
    x = layers.add([x, residual])

    for i in range(8):
        residual = x
        prefix = 'block' + str(i + 5)

        x = Activation('relu', name=prefix + '_sepconv1_act')(x)
        x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv1')(x)
        x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv2_act')(x)
        x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv2')(x)
        x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv3_act')(x)
        x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv3')(x)
        x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)

        x = layers.add([x, residual])

    residual = Conv2D(1024, (1, 1), strides=(2, 2),
                      padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block13_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block13_sepconv1')(x)
    x = BatchNormalization(name='block13_sepconv1_bn')(x)
    x = Activation('relu', name='block13_sepconv2_act')(x)
    x = SeparableConv2D(1024, (3, 3), padding='same', use_bias=False, name='block13_sepconv2')(x)
    x = BatchNormalization(name='block13_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block13_pool')(x)
    x = layers.add([x, residual])

    x = SeparableConv2D(1536, (3, 3), padding='same', use_bias=False, name='block14_sepconv1')(x)
    x = BatchNormalization(name='block14_sepconv1_bn')(x)
    x = Activation('relu', name='block14_sepconv1_act')(x)

    x = SeparableConv2D(2048, (3, 3), padding='same', use_bias=False, name='block14_sepconv2')(x)
    x = BatchNormalization(name='block14_sepconv2_bn')(x)
    x = Activation('relu', name='block14_sepconv2_act')(x)

    if include_top:
        x = GlobalAveragePooling2D(name='avg_pool')(x)
        x = Dense(classes, activation='softmax', name='predictions')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='xception')

    # load weights
    if weights == 'imagenet':
        if include_top:
            weights_path = get_file('xception_weights_tf_dim_ordering_tf_kernels.h5',
                                    TF_WEIGHTS_PATH,
                                    cache_subdir='models')
        else:
            weights_path = get_file('xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                    TF_WEIGHTS_PATH_NO_TOP,
                                    cache_subdir='models')
        model.load_weights(weights_path)

    if old_data_format:
        K.set_image_data_format(old_data_format)
    return model
示例#23
0
def run():
    global history

    # config = tf.compat.v1.ConfigProto()
    # config.gpu_options.allow_growth = True
    # tf.compat.v1.keras.backend.set_session(tf.compat.v1.Session(config=config))

    # 训练模式 :0 测试模式 :1
    K.set_image_data_format('channels_last')
    K.set_learning_phase(0)

    all_result_file = open(os.path.join(result_path, 'all_result.txt'), "w")
    all_result_file.close()

    # 这里我不能单被试进行实验 分类样本太少! 只能通过去完基线的数据 一起进行实验.
    # 所以我的训练集就是 63个样本 每个样本有时域特征和频域特征
    train_specInput = np.load(train_specInput_root_path)
    train_tempInput = np.load(train_tempInput_root_path)
    train_label = np.load(train_label_root_path)

    # 封装数据时混乱
    # 包装的时候 用第零维代表个数
    index = np.arange(train_specInput.shape[0])
    np.random.shuffle(index)

    train_specInput = train_specInput[index]
    train_tempInput = train_tempInput[index]
    train_label = train_label[index]

    # 编码为 0 1 2 ; x if 编码为 1 2 3
    train_label = [x - 1 for x in train_label]
    train_label = to_categorical(train_label)

    # Evaluate
    test_specInput = np.load(test_specInput_root_path)
    test_tempInput = np.load(test_tempInput_root_path)
    test_label = np.load(test_label_root_path)

    # 编码为 0 1 2 ; x if 编码为 1 2 3
    test_label = [x - 1 for x in test_label]
    test_label = to_categorical(test_label)

    # 可以调整用depth 还是用nb_layers_per_block
    model = sst_model.sst_emotionnet(input_width=input_width,
                                     specInput_length=specInput_length,
                                     temInput_length=temInput_length,
                                     depth_spec=depth_spec,
                                     depth_tem=depth_tem,
                                     gr_spec=gr_spec,
                                     gr_tem=gr_tem,
                                     nb_dense_block=nb_dense_block,
                                     nb_class=nb_class,
                                     nb_layers_per_block=nb_layers_per_block)
    # 训练模式
    adam = keras.optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=1)

    save_model = ModelCheckpoint(filepath=model_save_path,
                                 monitor='val_accuracy',
                                 save_best_only=True)

    history = model.fit([train_specInput, train_tempInput],
                        train_label,
                        epochs=nbEpoch,
                        batch_size=batch_size,
                        validation_data=([test_specInput,
                                          test_tempInput], test_label),
                        callbacks=[early_stopping, save_model],
                        verbose=1,
                        validation_split=0.25)

    # 测试模式
    model = load_model(model_save_path)
    loss, accuracy = model.evaluate([test_specInput, test_tempInput],
                                    test_label)
    print('\ntest loss', loss)
    print('accuracy', accuracy)

    # Result Processing
    f = open(result_path, "w")
    print(history.history, file=f)
    f.close()

    maxAcc = max(history.history['val_accuracy'])
    print("maxAcc = " + str(maxAcc))
    all_result_file = open(os.path.join(result_path, 'all_result.txt'), "a")
    print(str(accuracy), file=all_result_file)
    all_result_file.close()
示例#24
0
from __future__ import print_function
import glob
import numpy as np
from PIL import Image
import pandas as pd
import matplotlib.pyplot as plt
import os
import tensorflow as tf
import keras
from keras.utils import np_utils, generic_utils
from keras.preprocessing.image import ImageDataGenerator
from keras import backend as K

K.set_image_data_format('channels_last')
K.set_image_dim_ordering('th')

from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.callbacks import ModelCheckpoint
from keras.callbacks import TensorBoard

plt.style.use('ggplot')
os.system('clear')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

root_path = "dataset/"
cat_path = root_path + "cat/"
model_path = root_path + "model/weights.best.hdf5"

# 2 for original, 3 for smooth
示例#25
0
from sklearn.model_selection import train_test_split
import os
import glob
import h5py

from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, model_from_json
from keras.layers.core import Dense, Dropout, Activation, Flatten, Lambda
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D

from keras.optimizers import SGD
from keras.utils import np_utils
from keras.callbacks import LearningRateScheduler, ModelCheckpoint
from keras import backend as K
K.set_image_data_format('channels_first')

from matplotlib import pyplot as plt

NUM_CLASSES = 44
IMG_SIZE = 48

def preprocess_img(img):
    # Histogram normalization in y
    hsv = color.rgb2hsv(img)
    hsv[:,:,2] = exposure.equalize_hist(hsv[:,:,2])
    img = color.hsv2rgb(hsv)

    # central scrop
    min_side = min(img.shape[:-1])
    centre = img.shape[0]//2, img.shape[1]//2
示例#26
0
from keras.utils import to_categorical
from keras.callbacks import ModelCheckpoint
from keras.regularizers import l2

from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classification_report, confusion_matrix

# Set your path to the dataset
us8k_path = os.path.abspath('./data')
audio_path = os.path.join(us8k_path, 'wav/train')
metadata_path = os.path.join(us8k_path, 'metadata/train.csv')
models_path = os.path.abspath('./models')
data_path = os.path.abspath('./data')

# Ensure "channel last" data format on Keras
keras_backend.set_image_data_format('channels_last')

# Define a labels array for future use
labels = ['Computer_Keyboard', 'Knock', 'Telephone']

# Pre-processed MFCC coefficients
X = np.load("data/X-mfcc.npy")
y = np.load("data/y-mfcc.npy")

# Metadata
metadata = pd.read_csv(metadata_path)

#total = len(metadata)
total = X.shape[0]
indexes = list(range(0, total))
示例#27
0
def main():
    parser = argparse.ArgumentParser(description='Train Font GAN')
    parser.add_argument(
        '--datasetpath00',
        '-d_train0',
        type=str,
        default="sugao2egaoRaw_train.hdf5")  #,  "required=True)
    parser.add_argument(
        '--datasetpath01',
        '-d_train1',
        type=str,
        default="egao2ikariRaw_train.hdf5")  #,  "required=True)
    parser.add_argument(
        '--datasetpath02',
        '-d_train2',
        type=str,
        default="ikari2kuyasiRaw_train.hdf5")  #,  "required=True)
    parser.add_argument(
        '--datasetpath03',
        '-d_train3',
        type=str,
        default="kuyasi2nakiRaw_train.hdf5")  #,  "required=True)
    parser.add_argument('--datasetpath04',
                        '-d_train4',
                        type=str,
                        default="naki2henRaw_train.hdf5")  #,  "required=True)
    parser.add_argument('--datasetpath05',
                        '-d_train5',
                        type=str,
                        default="hen2sugaoRaw_train.hdf5")  #,  "required=True)

    parser.add_argument('--datasetpath10',
                        '-d_test0',
                        type=str,
                        default="sugao2egaoRaw_test.hdf5")  #, required=True)
    parser.add_argument('--datasetpath11',
                        '-d_test1',
                        type=str,
                        default="egao2ikariRaw_test.hdf5")  #, required=True)
    parser.add_argument('--datasetpath12',
                        '-d_test2',
                        type=str,
                        default="egao2ikariRaw_test.hdf5")  #, required=True)
    parser.add_argument('--datasetpath13',
                        '-d_test3',
                        type=str,
                        default="kuyasi2nakiRaw_test.hdf5")  #, required=True)
    parser.add_argument('--datasetpath14',
                        '-d_test4',
                        type=str,
                        default="naki2henRaw_test.hdf5")  #, required=True)
    parser.add_argument('--datasetpath15',
                        '-d_test5',
                        type=str,
                        default="hen2sugaoRaw_test.hdf5")  #, required=True)

    parser.add_argument('--patch_size', '-p', type=int, default=64)
    parser.add_argument('--batch_size', '-b', type=int, default=5)
    parser.add_argument('--epoch', '-e', type=int, default=8001)
    args = parser.parse_args()

    K.set_image_data_format("channels_last")

    my_train(args)
示例#28
0
import os
import glob
import datetime
import numpy as np
import pandas as pd
from imageio import imwrite
from scipy.misc.pilutil import imread, imresize
from keras import backend
from keras.initializers import glorot_uniform
from keras.models import Model, Sequential
from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Flatten, Input, LeakyReLU, UpSampling2D
from keras.layers.merge import Concatenate
from keras.optimizers import Adam, RMSprop

backend.set_image_data_format("channels_first")
os.environ["CUDA_VISIBLE_DEVICES"] = '-1' 

def Discriminator(input_shape):
    img_A, img_B = Input(input_shape), Input(input_shape)
    x = Concatenate(axis=1)([img_A, img_B])
    x = Conv2D(filters=4, kernel_size=(4, 4), strides=(1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation=LeakyReLU(alpha=.2))(x)
    x = Conv2D(filters=8, kernel_size=(4, 4), strides=(2, 2), kernel_initializer=glorot_uniform(seed=1), padding='same', activation=LeakyReLU(alpha=.2))(x)
    x = BatchNormalization(epsilon=.001, momentum=.9)(x)
    x = Conv2D(filters=16, kernel_size=(4, 4), strides=(2, 2), kernel_initializer=glorot_uniform(seed=1), padding='same', activation=LeakyReLU(alpha=.2))(x)
    x = BatchNormalization(epsilon=.001, momentum=.9)(x)
    x = Conv2D(filters=32, kernel_size=(4, 4), strides=(2, 2), kernel_initializer=glorot_uniform(seed=1), padding='same', activation=LeakyReLU(alpha=.2))(x)
    x = BatchNormalization(epsilon=.001, momentum=.9)(x)
    x = Conv2D(filters=64, kernel_size=(4, 4), strides=(2, 2), kernel_initializer=glorot_uniform(seed=1), padding='same', activation=LeakyReLU(alpha=.2))(x)
    x = BatchNormalization(epsilon=.001, momentum=.9)(x)
    x = Conv2D(filters=128, kernel_size=(4, 4), strides=(1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation=LeakyReLU(alpha=.2))(x)
    x = BatchNormalization(epsilon=.001, momentum=.9)(x)
示例#29
0
    assert args.opt_G in ["RMSprop", "SGD", "Adam", "AdamWithWeightnorm"], "Unsupported optimizer"
    assert args.opt_D in ["RMSprop", "SGD", "Adam", "AdamWithWeightnorm"], "Unsupported optimizer"

    # Set the backend by modifying the env variable
    if args.backend == "theano":
        os.environ["KERAS_BACKEND"] = "theano"
    elif args.backend == "tensorflow":
        os.environ["KERAS_BACKEND"] = "tensorflow"

    # Import the backend
    import keras.backend as K

    # manually set dim ordering otherwise it is not changed
    if args.backend == "theano":
        image_data_format = "channels_first"
        K.set_image_data_format(image_data_format)
    elif args.backend == "tensorflow":
        image_data_format = "channels_last"
        K.set_image_data_format(image_data_format)

    import train_WGAN

    model_name = "{0:%Y%m%d_%H%M%S}_WGAN_{1}".format(datetime.datetime.now(), os.path.basename(args.dset.rstrip('/')))
    print("\n\nMODEL NAME:", model_name, '\n\n')

    # Set default params
    d_params = {"generator": args.generator,
                "discriminator": args.discriminator,
                "dset": args.dset,
                "img_dim": args.img_dim,
                "nb_epoch": args.nb_epoch,
示例#30
0
文件: arguments.py 项目: nh13/gatk4
def parse_args():
    """Parse command line arguments.

    The args namespace is used promiscuously in this module.
    Its fields control the tensor definition, dataset generation, training, file I/O and evaluation.
    Some of the fields are typically dicts or lists that are not actually set on the command line,
    but via a companion argument also in the namespace.
    For example, input_symbols is set via the input_symbol_set string
    and, annotations is set via the annotation_set string.
    Here we also seed the random number generator.
    The keras image data format is set here as well via the channels_last or channels_first arguments.

    Returns:
        namespace: The args namespace that is used throughout this module.
    """
    parser = argparse.ArgumentParser()

    # Tensor defining arguments
    parser.add_argument('--tensor_name', default='read_tensor', choices=defines.TENSOR_MAPS_1D+defines.TENSOR_MAPS_2D,
                        help='String key which identifies the map from tensor channels to their meaning.')
    parser.add_argument('--labels', default=defines.SNP_INDEL_LABELS,
                        help='Dict mapping label names to their index within label tensors.')
    parser.add_argument('--input_symbol_set', default='dna_indel', choices=defines.INPUT_SYMBOLS.keys(),
                        help='Key which maps to an input symbol to index mapping.')
    parser.add_argument('--input_symbols', help='Dict mapping input symbols to their index within input tensors, '
                        + 'initialised via input_symbols_set argument')
    parser.add_argument('--batch_size', default=32, type=int,
                        help='Mini batch size for stochastic gradient descent algorithms.')
    parser.add_argument('--read_limit', default=128, type=int,
                        help='Maximum number of reads to load.')
    parser.add_argument('--window_size', default=128, type=int,
                        help='Size of sequence window to use as input, typically centered at a variant.')
    parser.add_argument('--base_quality_mode', default='phot', choices=['phot', 'phred', '1hot'],
                        help='How to treat base qualities, must be in [phot, phred, 1hot]')
    parser.add_argument('--channels_last', default=True, dest='channels_last', action='store_true',
                        help='Store the channels in the last axis of tensors, tensorflow->true, theano->false')
    parser.add_argument('--channels_first', dest='channels_last', action='store_false',
                        help='Store the channels in the first axis of tensors, tensorflow->false, theano->true')

    # Annotation arguments
    parser.add_argument('--annotations', help='Array of annotation names, initialised via annotation_set argument')
    parser.add_argument('--annotation_set', default='best_practices', choices=defines.ANNOTATIONS.keys(),
                        help='Key which maps to an annotations list (or _ to ignore annotations).')

    # Dataset generation related arguments
    parser.add_argument('--samples', default=500, type=int,
                        help='Maximum number of data samples to write or load.')
    parser.add_argument('--downsample_snps', default=1.0, type=float,
                        help='Rate of SNP examples that are kept must be in [0.0, 1.0].')
    parser.add_argument('--downsample_indels', default=1.0, type=float,
                        help='Rate of INDEL examples that are kept must be in [0.0, 1.0].')
    parser.add_argument('--downsample_not_snps', default=1.0, type=float,
                        help='Rate of NOT_SNP examples that are kept must be in [0.0, 1.0].')
    parser.add_argument('--downsample_not_indels', default=1.0, type=float,
                        help='Rate of NOT_INDEL examples that are kept must be in [0.0, 1.0].')
    parser.add_argument('--downsample_reference', default=0.001, type=float,
                        help='Rate of reference genotype examples that are kept must be in [0.0, 1.0].')
    parser.add_argument('--downsample_homozygous', default=0.001, type=float,
                        help='Rate of homozygous genotypes that are kept must be in [0.0, 1.0].')
    parser.add_argument('--start_pos', default=0, type=int,
                        help='Genomic position start for parallel tensor writing.')
    parser.add_argument('--end_pos', default=0, type=int,
                        help='Genomic position end for parallel tensor writing.')
    parser.add_argument('--skip_positive_class', default=False, action='store_true',
                        help='Whether to skip positive examples when writing tensors.')
    parser.add_argument('--chrom', help='Chromosome to load for parallel tensor writing.')


    # I/O files and directories: vcfs, bams, beds, hd5, fasta
    parser.add_argument('--output_dir', default='./', help='Directory to write models or other data out.')
    parser.add_argument('--image_dir', default=None, help='Directory to write images and plots to.')
    parser.add_argument('--reference_fasta', help='The reference FASTA file (e.g. HG19 or HG38).')
    parser.add_argument('--weights_hd5', default='',
                        help='A hd5 file of weights to initialize a model, will use all layers with names that match.')
    parser.add_argument('--architecture', default='',
                        help='A json file specifying semantics and architecture of a neural net.')
    parser.add_argument('--bam_file',
                        help='Path to a BAM file to train from or generate tensors with.')
    parser.add_argument('--train_vcf',
                        help='Path to a VCF that has verified true calls from NIST, platinum genomes, etc.')
    parser.add_argument('--input_vcf',
                        help='Haplotype Caller or VQSR generated VCF with raw annotation values [and quality scores].')
    parser.add_argument('--output_vcf', default=None,
                        help='Optional VCF to write to.')
    parser.add_argument('--bed_file',
                        help='Bed file specifying high confidence intervals associated with args.train_vcf.')
    parser.add_argument('--data_dir',
                        help='Directory of tensors, must be split into test/valid/train directories'
                            +'with subdirectories for each label.')

    # Training and optimization related arguments
    parser.add_argument('--epochs', default=25, type=int,
                        help='Number of epochs, typically passes through the entire dataset, not always well-defined.')
    parser.add_argument('--batch_normalization', default=False, action='store_true',
                        help='Mini batch normalization layers after convolutions.')
    parser.add_argument('--patience', default=4, type=int,
                        help='Maximum number of epochs to run without validation loss improvements (Early Stopping).')
    parser.add_argument('--training_steps', default=80, type=int,
                        help='Number of training batches to examine in an epoch.')
    parser.add_argument('--validation_steps', default=40, type=int,
                        help='Number of validation batches to examine in an epoch validation.')
    parser.add_argument('--iterations', default=5, type=int,
                        help='Generic iteration limit for hyperparameter optimization, animation, and other counts.')
    parser.add_argument('--tensor_board', default=False, action='store_true',
                        help='Add the tensor board callback.')

    # Architecture defining arguments
    parser.add_argument('--conv_width', default=5, type=int, help='Width of convolutional kernels.')
    parser.add_argument('--conv_height', default=5, type=int, help='Height of convolutional kernels.')
    parser.add_argument('--conv_dropout', default=0.0, type=float,
                        help='Dropout rate in convolutional layers.')
    parser.add_argument('--conv_batch_normalize', default=False, action='store_true',
                        help='Batch normalize convolutional layers.')
    parser.add_argument('--conv_layers', nargs='+', default=[128, 96, 64, 48], type=int,
                        help='List of sizes for each convolutional filter layer')
    parser.add_argument('--padding', default='valid', choices=['valid', 'same'],
                        help='Valid or same border padding for convolutional layers.')
    parser.add_argument('--spatial_dropout', default=False, action='store_true',
                        help='Spatial dropout on the convolutional layers.')
    parser.add_argument('--max_pools', nargs='+', default=[], type=int,
                        help='List of max-pooling layers.')
    parser.add_argument('--fc_layers', nargs='+', default=[32], type=int,
                        help='List of sizes for each fully connected layer')
    parser.add_argument('--fc_dropout', default=0.0, type=float,
                        help='Dropout rate in fully connected  layers.')
    parser.add_argument('--fc_batch_normalize', default=False, action='store_true',
                        help='Batch normalize fully connected layers.')
    parser.add_argument('--annotation_units', default=16, type=int,
                        help='Number of units connected to the annotation input layer.')
    parser.add_argument('--annotation_shortcut', default=False, action='store_true',
                        help='Shortcut connections on the annotations.')

    # Evaluation related arguments
    parser.add_argument('--score_keys', nargs='+', default=['VQSLOD'],
                        help='List of variant score keys for performance comparisons.')
    parser.add_argument('--tranches', nargs='+', default=[100, 99.9, 99, 95, 90], type=float,
                        help='List of variant score keys for performance comparisons.')

    # Run specific arguments
    parser.add_argument('--mode', help='High level recipe: write tensors, train, test or evaluate models.')
    parser.add_argument('--id', default='no_id',
                        help='Identifier for this run, user-defined string to keep experiments organized.')
    parser.add_argument('--random_seed', default=12878, type=int,
                        help='Random seed to use throughout run.  Always use np.random.')

    # Parse, print, set annotations and seed
    args = parser.parse_args()
    args.annotations = annotations_from_args(args)
    args.input_symbols = input_symbols_from_args(args)
    np.random.seed(args.random_seed)

    if args.channels_last:
        K.set_image_data_format('channels_last')
    else:
        K.set_image_data_format('channels_first')

    print('Arguments are', args)
    return args
示例#31
0
    parser.add_argument('--use_label_smoothing', action="store_true", help="Whether to smooth the positive labels when training D")
    parser.add_argument('--label_flipping', default=0, type=float, help="Probability (0 to 1.) to flip the labels when training D")
    parser.add_argument('--lastLayerActivation', type=str, default='tanh', help="Activation of the lastLayer")
    parser.add_argument('--PercentageOfTrianable', type=int, default=70, help="Percentage of Triantable Layers")
    args = parser.parse_args()

    # Set the backend by modifying the env variable
    os.environ["KERAS_BACKEND"] = "tensorflow"

    # Import the backend
    import keras.backend as K

    # manually set dim ordering otherwise it is not changed

    image_data_format = "channels_last"
    K.set_image_data_format(image_data_format)

    import trainPy3

    # Set default params
    d_params = {"dset": args.dset,
                "generator": args.generator,
                "batch_size": args.batch_size,
                "n_batch_per_epoch": args.n_batch_per_epoch,
                "nb_epoch": args.nb_epoch,
                "model_name": "ResNet",
                "epoch": args.epoch,
                "nb_classes": args.nb_classes,
                "do_plot": args.do_plot,
                "image_data_format": image_data_format,
                "bn_mode": args.bn_mode,
示例#32
0
def Xception_Mod(include_top=True,
                 weights='imagenet',
                 input_tensor=None,
                 input_shape=None,
                 pooling=None,
                 classes=1000):
    """Instantiates the Xception architecture.

    Optionally loads weights pre-trained
    on ImageNet. This model is available for TensorFlow only,
    and can only be used with inputs following the TensorFlow
    data format `(width, height, channels)`.
    You should set `image_data_format='channels_last'` in your Keras config
    located at ~/.keras/keras.json.

    Note that the default input image size for this model is 299x299.

    # Arguments
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization),
              'imagenet' (pre-training on ImageNet),
              or the path to the weights file to be loaded.
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(299, 299, 3)`.
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 71.
            E.g. `(150, 150, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.

    # Returns
        A Keras model instance.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
        RuntimeError: If attempting to run this model with a
            backend that does not support separable convolutions.
    """
    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')

    if K.backend() != 'tensorflow':
        raise RuntimeError('The Xception model is only available with '
                           'the TensorFlow backend.')
    if K.image_data_format() != 'channels_last':
        warnings.warn(
            'The Xception model is only available for the '
            'input data format "channels_last" '
            '(width, height, channels). '
            'However your settings specify the default '
            'data format "channels_first" (channels, width, height). '
            'You should set `image_data_format="channels_last"` in your Keras '
            'config located at ~/.keras/keras.json. '
            'The model being returned right now will expect inputs '
            'to follow the "channels_last" data format.')
        K.set_image_data_format('channels_last')
        old_data_format = 'channels_first'
    else:
        old_data_format = None

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=299,
                                      min_size=71,
                                      data_format=K.image_data_format(),
                                      require_flatten=False,
                                      weights=weights)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = Conv2D(32, (3, 3),
               strides=(2, 2),
               padding='same',
               use_bias=False,
               name='block1_conv1')(img_input)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)
    x = Conv2D(64, (3, 3),
               strides=(1, 2),
               padding='same',
               use_bias=False,
               name='block1_conv2')(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Conv2D(128, (1, 1),
                      strides=(1, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv1')(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv2')(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(1, 2),
                     padding='same',
                     name='block2_pool')(x)
    x = layers.add([x, residual])

    residual = Conv2D(256, (1, 1),
                      strides=(1, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block3_sepconv1_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv1')(x)
    x = BatchNormalization(name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv2')(x)
    x = BatchNormalization(name='block3_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(1, 2),
                     padding='same',
                     name='block3_pool')(x)
    x = layers.add([x, residual])

    residual = Conv2D(728, (1, 1),
                      strides=(1, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block4_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv1')(x)
    x = BatchNormalization(name='block4_sepconv1_bn')(x)
    x = Activation('relu', name='block4_sepconv2_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv2')(x)
    x = BatchNormalization(name='block4_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(1, 2),
                     padding='same',
                     name='block4_pool')(x)
    x = layers.add([x, residual])

    for i in range(8):
        residual = x
        prefix = 'block' + str(i + 5)

        x = Activation('relu', name=prefix + '_sepconv1_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv1')(x)
        x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv2_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv2')(x)
        x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv3_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv3')(x)
        x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)

        x = layers.add([x, residual])

    residual = Conv2D(1024, (1, 1),
                      strides=(1, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block13_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block13_sepconv1')(x)
    x = BatchNormalization(name='block13_sepconv1_bn')(x)
    x = Activation('relu', name='block13_sepconv2_act')(x)
    x = SeparableConv2D(1024, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block13_sepconv2')(x)
    x = BatchNormalization(name='block13_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(1, 2),
                     padding='same',
                     name='block13_pool')(x)
    x = layers.add([x, residual])

    x = SeparableConv2D(1536, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block14_sepconv1')(x)
    x = BatchNormalization(name='block14_sepconv1_bn')(x)
    x = Activation('relu', name='block14_sepconv1_act')(x)

    x = SeparableConv2D(2048, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block14_sepconv2')(x)
    x = BatchNormalization(name='block14_sepconv2_bn')(x)
    x = Activation('relu', name='block14_sepconv2_act')(x)

    if include_top:
        x = GlobalAveragePooling2D(name='avg_pool')(x)
        x = Dense(classes, activation='softmax', name='predictions')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='xception')

    # load weights
    if weights == 'imagenet':
        if include_top:
            weights_path = get_file(
                'xception_weights_tf_dim_ordering_tf_kernels.h5',
                TF_WEIGHTS_PATH,
                cache_subdir='models',
                file_hash='0a58e3b7378bc2990ea3b43d5981f1f6')
        else:
            weights_path = get_file(
                'xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
                TF_WEIGHTS_PATH_NO_TOP,
                cache_subdir='models',
                file_hash='b0042744bf5b25fce3cb969f33bebb97')
        model.load_weights(weights_path)
    elif weights is not None:
        model.load_weights(weights)

    if old_data_format:
        K.set_image_data_format(old_data_format)
    return model
Last changes: 2018-05-23
Contributors: charley
"""

from __future__ import absolute_import, division

import numpy as np
import tensorflow as tf

from keras import backend as K
from keras.engine import Input, Model
from keras.layers import Conv3D, MaxPooling3D, UpSampling3D, Activation, BatchNormalization
from keras.optimizers import Adam
from keras.models import load_model

K.set_image_data_format("channels_first")

from keras.layers.merge import concatenate


def dice_coefficient(y_true, y_pred, smooth=1.):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)


def dice_coefficient_loss(y_true, y_pred):
    return -dice_coefficient(y_true, y_pred)

def MobileNetV2(input_shape=None,
                alpha=1.0,
                depth_multiplier=1,
                include_top=True,
                weights='imagenet',
                input_tensor=None,
                classes=1000):
    """Instantiates the MobileNetV2 architecture.

    To load a MobileNetV2 model via `load_model`, import the custom
    objects `relu6` and pass them to the `custom_objects` parameter.
    E.g.
    model = load_model('mobilenet.h5', custom_objects={
                       'relu6': mobilenet.relu6})

    # Arguments
        input_shape: optional shape tuple, to be specified if you would
            like to use a model with an input img resolution that is not
            (224, 224, 3).
            It should have exactly 3 inputs channels (224, 224, 3).
            You can also omit this option if you would like
            to infer input_shape from an input_tensor.
            If you choose to include both input_tensor and input_shape then
            input_shape will be used if they match, if the shapes
            do not match then we will throw an error.
            E.g. `(160, 160, 3)` would be one valid value.
        alpha: controls the width of the network. This is known as the
        width multiplier in the MobileNetV2 paper.
            - If `alpha` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `alpha` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `alpha` = 1, default number of filters from the paper
                 are used at each layer.
        depth_multiplier: depth multiplier for depthwise convolution
            (also called the resolution multiplier)
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization),
              'imagenet' (pre-training on ImageNet),
              or the path to the weights file to be loaded.
        input_tensor: optional Keras tensor (i.e. output of
            `layers.Input()`)
            to use as image input for the model.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.

    # Returns
        A Keras model instance.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape or invalid depth_multiplier, alpha,
            rows when weights='imagenet'
    """

    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as ImageNet with `include_top` '
                         'as true, `classes` should be 1000')

    # Determine proper input shape and default size.
    # If both input_shape and input_tensor are used, they should match
    if input_shape is not None and input_tensor is not None:
        try:
            is_input_t_tensor = K.is_keras_tensor(input_tensor)
        except ValueError:
            try:
                is_input_t_tensor = K.is_keras_tensor(
                    get_source_inputs(input_tensor))
            except ValueError:
                raise ValueError('input_tensor: ', input_tensor,
                                 'is not type input_tensor')
        if is_input_t_tensor:
            if K.image_data_format == 'channels_first':
                if input_tensor._keras_shape[1] != input_shape[1]:
                    raise ValueError('input_shape: ', input_shape,
                                     'and input_tensor: ', input_tensor,
                                     'do not meet the same shape requirements')
            else:
                if input_tensor._keras_shape[2] != input_shape[1]:
                    raise ValueError('input_shape: ', input_shape,
                                     'and input_tensor: ', input_tensor,
                                     'do not meet the same shape requirements')
        else:
            raise ValueError('input_tensor specified: ', input_tensor,
                             'is not a keras tensor')

    # If input_shape is None, infer shape from input_tensor
    if input_shape is None and input_tensor is not None:

        try:
            K.is_keras_tensor(input_tensor)
        except ValueError:
            raise ValueError('input_tensor: ', input_tensor,
                             'is type: ', type(input_tensor),
                             'which is not a valid type')

        if input_shape is None and not K.is_keras_tensor(input_tensor):
            default_size = 224
        elif input_shape is None and K.is_keras_tensor(input_tensor):
            if K.image_data_format() == 'channels_first':
                rows = input_tensor._keras_shape[2]
                cols = input_tensor._keras_shape[3]
            else:
                rows = input_tensor._keras_shape[1]
                cols = input_tensor._keras_shape[2]

            if rows == cols and rows in [96, 128, 160, 192, 224]:
                default_size = rows
            else:
                default_size = 224

    # If input_shape is None and no input_tensor
    elif input_shape is None:
        default_size = 224

    # If input_shape is not None, assume default size
    else:
        if K.image_data_format() == 'channels_first':
            rows = input_shape[1]
            cols = input_shape[2]
        else:
            rows = input_shape[0]
            cols = input_shape[1]

        if rows == cols and rows in [96, 128, 160, 192, 224]:
            default_size = rows
        else:
            default_size = 224

    input_shape = _obtain_input_shape(input_shape,
                                      default_size=default_size,
                                      min_size=32,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)

    if K.image_data_format() == 'channels_last':
        row_axis, col_axis = (0, 1)
    else:
        row_axis, col_axis = (1, 2)
    rows = input_shape[row_axis]
    cols = input_shape[col_axis]

    if weights == 'imagenet':
        if depth_multiplier != 1:
            raise ValueError('If imagenet weights are being loaded, '
                             'depth multiplier must be 1')

        if alpha not in [0.35, 0.50, 0.75, 1.0, 1.3, 1.4]:
            raise ValueError('If imagenet weights are being loaded, '
                             'alpha can be one of'
                             '`0.25`, `0.50`, `0.75` or `1.0` only.')

        if rows != cols or rows not in [96, 128, 160, 192, 224]:
            if rows is None:
                rows = 224
                warnings.warn('MobileNet shape is undefined.'
                              ' Weights for input shape'
                              '(224, 224) will be loaded.')
            else:
                raise ValueError('If imagenet weights are being loaded, '
                                 'input must have a static square shape'
                                 '(one of (96, 96), (128, 128), (160, 160),'
                                 '(192, 192), or (224, 224)).'
                                 'Input shape provided = %s' % (input_shape,))

    if K.image_data_format() != 'channels_last':
        warnings.warn('The MobileNet family of models is only available '
                      'for the input data format "channels_last" '
                      '(width, height, channels). '
                      'However your settings specify the default '
                      'data format "channels_first" (channels, width, height).'
                      ' You should set `image_data_format="channels_last"` '
                      'in your Keras config located at ~/.keras/keras.json. '
                      'The model being returned right now will expect inputs '
                      'to follow the "channels_last" data format.')
        K.set_image_data_format('channels_last')
        old_data_format = 'channels_first'
    else:
        old_data_format = None

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    first_block_filters = _make_divisible(32 * alpha, 8)
    x = Conv2D(first_block_filters,
               kernel_size=3,
               strides=(2, 2), padding='same',
               use_bias=False, name='Conv1')(img_input)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999, name='bn_Conv1')(x)
    x = Activation(relu6, name='Conv1_relu')(x)

    x = _first_inverted_res_block(x,
                                  filters=16,
                                  alpha=alpha,
                                  stride=1,
                                  expansion=1,
                                  block_id=0)

    x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,
                            expansion=6, block_id=1)
    x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,
                            expansion=6, block_id=2)

    x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,
                            expansion=6, block_id=3)
    x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
                            expansion=6, block_id=4)
    x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
                            expansion=6, block_id=5)

    x = _inverted_res_block(x, filters=64, alpha=alpha, stride=2,
                            expansion=6, block_id=6)
    x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
                            expansion=6, block_id=7)
    x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
                            expansion=6, block_id=8)
    x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
                            expansion=6, block_id=9)

    x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
                            expansion=6, block_id=10)
    x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
                            expansion=6, block_id=11)
    x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
                            expansion=6, block_id=12)

    x = _inverted_res_block(x, filters=160, alpha=alpha, stride=2,
                            expansion=6, block_id=13)
    x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1,
                            expansion=6, block_id=14)
    x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1,
                            expansion=6, block_id=15)

    x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1,
                            expansion=6, block_id=16)

    # no alpha applied to last conv as stated in the paper:
    # if the width multiplier is greater than 1 we
    # increase the number of output channels
    if alpha > 1.0:
        last_block_filters = _make_divisible(1280 * alpha, 8)
    else:
        last_block_filters = 1280

    x = Conv2D(last_block_filters,
               kernel_size=1,
               use_bias=False,
               name='Conv_1')(x)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999, name='Conv_1_bn')(x)
    x = Activation(relu6, name='out_relu')(x)

    if include_top:
        x = GlobalAveragePooling2D()(x)
        x = Dense(classes, activation='softmax',
                  use_bias=True, name='Logits')(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = Model(inputs, x, name='mobilenetv2_%0.2f_%s' % (alpha, rows))

    # load weights
    if weights == 'imagenet':
        if K.image_data_format() == 'channels_first':
            raise ValueError('Weights for "channels_first" format '
                             'are not available.')

        if include_top:
            model_name = 'mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' + \
                str(alpha) + '_' + str(rows) + '.h5'
            weigh_path = BASE_WEIGHT_PATH + model_name
            weights_path = get_file(model_name, weigh_path,
                                    cache_subdir='models')
        else:
            model_name = 'mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' + \
                str(alpha) + '_' + str(rows) + '_no_top' + '.h5'
            weigh_path = BASE_WEIGHT_PATH + model_name
            weights_path = get_file(model_name, weigh_path,
                                    cache_subdir='models')
        print("load weights from", weights_path)
        model.load_weights(weights_path)
    elif weights is not None:
        model.load_weights(weights)

    if old_data_format:
        K.set_image_data_format(old_data_format)
    return model
示例#35
0
def parse_args():
    """Parse command line arguments.

    The args namespace is used promiscuously in this module.
    Its fields control the tensor definition, dataset generation, training, file I/O and evaluation.
    Some of the fields are typically dicts or lists that are not actually set on the command line,
    but via a companion argument also in the namespace.
    For example, input_symbols is set via the input_symbol_set string
    and, annotations is set via the annotation_set string.
    Here we also seed the random number generator.
    The keras image data format is set here as well via the channels_last or channels_first arguments.

    Returns:
        namespace: The args namespace that is used throughout this module.
    """
    parser = argparse.ArgumentParser()

    # Tensor defining arguments
    parser.add_argument('--tensor_name', default='read_tensor', choices=defines.TENSOR_MAPS_1D+defines.TENSOR_MAPS_2D,
                        help='String key which identifies the map from tensor channels to their meaning.')
    parser.add_argument('--labels', default=defines.SNP_INDEL_LABELS,
                        help='Dict mapping label names to their index within label tensors.')
    parser.add_argument('--input_symbol_set', default='dna_indel', choices=defines.INPUT_SYMBOLS.keys(),
                        help='Key which maps to an input symbol to index mapping.')
    parser.add_argument('--input_symbols', help='Dict mapping input symbols to their index within input tensors, '
                        + 'initialised via input_symbols_set argument')
    parser.add_argument('--batch_size', default=32, type=int,
                        help='Mini batch size for stochastic gradient descent algorithms.')
    parser.add_argument('--read_limit', default=128, type=int,
                        help='Maximum number of reads to load.')
    parser.add_argument('--window_size', default=128, type=int,
                        help='Size of sequence window to use as input, typically centered at a variant.')
    parser.add_argument('--base_quality_mode', default='phot', choices=['phot', 'phred', '1hot'],
                        help='How to treat base qualities, must be in [phot, phred, 1hot]')
    parser.add_argument('--channels_last', default=True, dest='channels_last', action='store_true',
                        help='Store the channels in the last axis of tensors, tensorflow->true, theano->false')
    parser.add_argument('--channels_first', dest='channels_last', action='store_false',
                        help='Store the channels in the first axis of tensors, tensorflow->false, theano->true')

    # Annotation arguments
    parser.add_argument('--annotations', help='Array of annotation names, initialised via annotation_set argument')
    parser.add_argument('--annotation_set', default='best_practices', choices=defines.ANNOTATIONS_SETS.keys(),
                        help='Key which maps to an annotations list (or _ to ignore annotations).')

    # Dataset generation related arguments
    parser.add_argument('--samples', default=500, type=int,
                        help='Maximum number of data samples to write or load.')
    parser.add_argument('--downsample_snps', default=1.0, type=float,
                        help='Rate of SNP examples that are kept must be in [0.0, 1.0].')
    parser.add_argument('--downsample_indels', default=1.0, type=float,
                        help='Rate of INDEL examples that are kept must be in [0.0, 1.0].')
    parser.add_argument('--downsample_not_snps', default=1.0, type=float,
                        help='Rate of NOT_SNP examples that are kept must be in [0.0, 1.0].')
    parser.add_argument('--downsample_not_indels', default=1.0, type=float,
                        help='Rate of NOT_INDEL examples that are kept must be in [0.0, 1.0].')
    parser.add_argument('--downsample_reference', default=0.001, type=float,
                        help='Rate of reference genotype examples that are kept must be in [0.0, 1.0].')
    parser.add_argument('--downsample_homozygous', default=0.001, type=float,
                        help='Rate of homozygous genotypes that are kept must be in [0.0, 1.0].')
    parser.add_argument('--start_pos', default=0, type=int,
                        help='Genomic position start for parallel tensor writing.')
    parser.add_argument('--end_pos', default=0, type=int,
                        help='Genomic position end for parallel tensor writing.')
    parser.add_argument('--skip_positive_class', default=False, action='store_true',
                        help='Whether to skip positive examples when writing tensors.')
    parser.add_argument('--chrom', help='Chromosome to load for parallel tensor writing.')


    # I/O files and directories: vcfs, bams, beds, hd5, fasta
    parser.add_argument('--output_dir', default='./', help='Directory to write models or other data out.')
    parser.add_argument('--image_dir', default=None, help='Directory to write images and plots to.')
    parser.add_argument('--reference_fasta', help='The reference FASTA file (e.g. HG19 or HG38).')
    parser.add_argument('--weights_hd5', default='',
                        help='A hd5 file of weights to initialize a model, will use all layers with names that match.')
    parser.add_argument('--architecture', default='',
                        help='A json file specifying semantics and architecture of a neural net.')
    parser.add_argument('--bam_file',
                        help='Path to a BAM file to train from or generate tensors with.')
    parser.add_argument('--train_vcf',
                        help='Path to a VCF that has verified true calls from NIST, platinum genomes, etc.')
    parser.add_argument('--input_vcf',
                        help='Haplotype Caller or VQSR generated VCF with raw annotation values [and quality scores].')
    parser.add_argument('--output_vcf', default=None,
                        help='Optional VCF to write to.')
    parser.add_argument('--bed_file',
                        help='Bed file specifying high confidence intervals associated with args.train_vcf.')
    parser.add_argument('--data_dir',
                        help='Directory of tensors, must be split into test/valid/train directories'
                            +'with subdirectories for each label.')

    # Training and optimization related arguments
    parser.add_argument('--epochs', default=25, type=int,
                        help='Number of epochs, typically passes through the entire dataset, not always well-defined.')
    parser.add_argument('--batch_normalization', default=False, action='store_true',
                        help='Mini batch normalization layers after convolutions.')
    parser.add_argument('--patience', default=4, type=int,
                        help='Maximum number of epochs to run without validation loss improvements (Early Stopping).')
    parser.add_argument('--training_steps', default=80, type=int,
                        help='Number of training batches to examine in an epoch.')
    parser.add_argument('--validation_steps', default=40, type=int,
                        help='Number of validation batches to examine in an epoch validation.')
    parser.add_argument('--iterations', default=5, type=int,
                        help='Generic iteration limit for hyperparameter optimization, animation, and other counts.')
    parser.add_argument('--tensor_board', default=False, action='store_true',
                        help='Add the tensor board callback.')

    # Architecture defining arguments
    parser.add_argument('--conv_width', default=5, type=int, help='Width of convolutional kernels.')
    parser.add_argument('--conv_height', default=5, type=int, help='Height of convolutional kernels.')
    parser.add_argument('--conv_dropout', default=0.0, type=float,
                        help='Dropout rate in convolutional layers.')
    parser.add_argument('--conv_batch_normalize', default=False, action='store_true',
                        help='Batch normalize convolutional layers.')
    parser.add_argument('--conv_layers', nargs='+', default=[128, 96, 64, 48], type=int,
                        help='List of sizes for each convolutional filter layer')
    parser.add_argument('--padding', default='valid', choices=['valid', 'same'],
                        help='Valid or same border padding for convolutional layers.')
    parser.add_argument('--spatial_dropout', default=False, action='store_true',
                        help='Spatial dropout on the convolutional layers.')
    parser.add_argument('--max_pools', nargs='+', default=[], type=int,
                        help='List of max-pooling layers.')
    parser.add_argument('--fc_layers', nargs='+', default=[32], type=int,
                        help='List of sizes for each fully connected layer')
    parser.add_argument('--fc_dropout', default=0.0, type=float,
                        help='Dropout rate in fully connected  layers.')
    parser.add_argument('--fc_batch_normalize', default=False, action='store_true',
                        help='Batch normalize fully connected layers.')
    parser.add_argument('--annotation_units', default=16, type=int,
                        help='Number of units connected to the annotation input layer.')
    parser.add_argument('--annotation_shortcut', default=False, action='store_true',
                        help='Shortcut connections on the annotations.')

    # Evaluation related arguments
    parser.add_argument('--score_keys', nargs='+', default=['VQSLOD'],
                        help='List of variant score keys for performance comparisons.')
    parser.add_argument('--tranches', nargs='+', default=[100, 99.9, 99, 95, 90], type=float,
                        help='List of variant score keys for performance comparisons.')

    # Run specific arguments
    parser.add_argument('--mode', help='High level recipe: write tensors, train, test or evaluate models.')
    parser.add_argument('--id', default='no_id',
                        help='Identifier for this run, user-defined string to keep experiments organized.')
    parser.add_argument('--gatk_version', default='4.1.0.0',
                        help='GATK version used to run this code.')
    parser.add_argument('--model_version', default='1.0',
                        help='Model version for this run.')
    parser.add_argument('--random_seed', default=12878, type=int,
                        help='Random seed to use throughout run.  Always use np.random.')

    # Parse, print, set annotations and seed
    args = parser.parse_args()
    args.annotations = annotations_from_args(args)
    args.input_symbols = input_symbols_from_args(args)
    np.random.seed(args.random_seed)

    if args.channels_last:
        K.set_image_data_format('channels_last')
    else:
        K.set_image_data_format('channels_first')

    print('Arguments are', args)
    return args
示例#36
0
def model_init(input_shape, **kwargs):
    from keras.models import Sequential
    from keras.layers.core import Dense, Activation, Flatten, Dropout
    from keras.layers.convolutional import Convolution2D, MaxPooling2D
    from keras.layers.normalization import BatchNormalization
    from keras.backend import set_image_data_format

    assert (len(input_shape) == 3 and input_shape[2] == 3)
    set_image_data_format('channels_last')

    try:
        from keras.layers.core import SpatialDropout2D
    except:
        from keras import __version__ as __kv__
        from warnings import warn
        warn('no SpatialDropout2D layer in keras version: %s' % __kv__)
        SpatialDropout2D = Dropout

    # need to set the input_shape to first layer for a new model
    model = Sequential()
    model.add(
        Convolution2D(32, (3, 3), padding='same', input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.1))

    # 2
    model.add(Convolution2D(48, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.1))

    # 3
    model.add(Convolution2D(64, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.2))

    # 4
    model.add(Convolution2D(128, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.2))

    # 5
    model.add(Convolution2D(164, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.3))

    # 6
    model.add(Convolution2D(172, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.3))

    # 7
    model.add(Convolution2D(196, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.4))

    # 8
    model.add(Convolution2D(224, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.4))

    # 9
    model.add(Convolution2D(248, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.5))

    # 10
    model.add(Convolution2D(296, (2, 2)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(0.5))

    model.add(Flatten())
    model.add(Dense(2048))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    return dict(model=model, lr_mult=1.0)
示例#37
0
def test_equivalence_channels_last():
    K.set_image_data_format('channels_last')
    _test_equivalence('channels_last')
示例#38
0
from keras.datasets import mnist
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
from keras import backend
backend.set_image_data_format('channels_first')

# 设定随机种子
seed = 7
np.random.seed(seed)

# 从Keras导入Mnist数据集
(X_train, y_train), (X_validation, y_validation) = mnist.load_data()

X_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')
X_validation = X_validation.reshape(X_validation.shape[0], 1, 28,
                                    28).astype('float32')

# 格式化数据到0-1之前
X_train = X_train / 255
X_validation = X_validation / 255

# one-hot编码
y_train = np_utils.to_categorical(y_train)
y_validation = np_utils.to_categorical(y_validation)
示例#39
0
from keras.layers import Input, Conv2D, MaxPooling2D, concatenate, Conv2DTranspose
from keras.models import Model
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
from keras import backend as K

from skimage.transform import resize
from skimage.io import imsave

import numpy as np
import os

from data import load_train_data, load_test_data

K.set_image_data_format('channels_last')  #tf dimension order

smooth = 1.
img_rows = 96
img_cols = 96


# Implementing Sorensons formula
# https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient
def dice_coef(y_true, y_pred):
    y_true_flat = K.flatten(y_true)
    y_pred_flat = K.flatten(y_pred)
    intersection = K.sum(y_true_flat * y_pred_flat)
    return (2. * intersection + smooth) / (K.sum(y_true_flat) +
                                           K.sum(y_pred_flat) + smooth)

示例#40
0
def getModelGivenModelOptionsAndWeightInits(args):
    #read in the arguments
    w0=args.w0
    w1=args.w1
    init_weights=args.init_weights
    seed=args.seed
    
    np.random.seed(seed)
    import keras;
    from keras.layers import (
        Activation, AveragePooling1D, BatchNormalization,
        Conv1D, Conv2D, Dense, Dropout, Flatten, Input,
        MaxPooling1D, MaxPooling2D, Reshape,
        PReLU, Add
    )
    from keras.models import Model
    from keras.optimizers import Adadelta, SGD, RMSprop;
    import keras.losses;
    from keras.constraints import maxnorm;
    from keras.layers.normalization import BatchNormalization
    from keras.regularizers import l1, l2    
    from keras import backend as K
    K.set_image_data_format('channels_last')
    print(K.image_data_format())

    import collections
    model_inputs = ["data/genome_data_dir"]
    shapes = {'data/genome_data_dir': [1000, 4]}
    keras_inputs = collections.OrderedDict([(name, Input(shape=shapes[name], name=name)) for name in model_inputs])
    inputs = keras_inputs
    num_tasks = ntasks
    seq_preds = inputs["data/genome_data_dir"]
    num_filters = (48, 64, 100, 150, 300, 200, 200, 200, 200)
    conv_width = (3, 3, 3, 7, 7, 7, 3, 3, 7)
    batch_norm = True
    pool_width=(3, 4, 4)
    pool_stride=(3, 4, 4)
    fc_layer_sizes=(1000, 1000)
    dropout=(0.3, 0.3)
    final_dropout=0.0,
    trainable=1
    final_layer_name='tuned_i_score'
    j = 0
    for i, (nb_filter, nb_col) in enumerate(zip(num_filters, conv_width)):
        seq_preds = Conv1D(nb_filter, nb_col, kernel_initializer='he_normal', trainable = bool(trainable))(seq_preds)
        if batch_norm:
            seq_preds = BatchNormalization(trainable = bool(trainable))(seq_preds)
        seq_preds = Activation('relu', trainable = bool(trainable))(seq_preds)

        if(i == 4 or i == 7 or i == 8):
            seq_preds = MaxPooling1D(pool_width[j], pool_stride[j], trainable = bool(trainable))(seq_preds)
            j = j+1

    seq_preds = Flatten()(seq_preds)

    # fully connect, drop before fc layers
    for drop_rate, fc_layer_size in zip(dropout, fc_layer_sizes):
        seq_preds = Dense(fc_layer_size)(seq_preds)
        if batch_norm:
            seq_preds = BatchNormalization()(seq_preds)
        seq_preds = Activation('relu')(seq_preds)
    seq_preds = Dense(num_tasks, name=final_layer_name)(seq_preds)
    seq_preds = Activation('sigmoid')(seq_preds)
    random_weight_model = Model(inputs=list(keras_inputs.values()), outputs=seq_preds)
    model = random_weight_model

    if (init_weights!=None):
        #load the weight initializations
        model.load_weights(init_weights, by_name=True)

    adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    print("compiling!")
    if w0!=None:
        loss=get_weighted_binary_crossentropy(w0_weights=w0,w1_weights=w1)
    else:
        loss=get_ambig_binary_crossentropy() 
    model.compile(optimizer=adam,
                  loss=loss,
                  metrics=[recall, specificity, fpr, fnr, precision, f1])
    return model
示例#41
0
# see:
# https://github.com/preddy5/segnet/blob/master/segnet.py
# https://github.com/0bserver07/Keras-SegNet-Basic/blob/master/SegNet-Basic.py
# https://github.com/imlab-uiip/keras-segnet/blob/master/build_model.py

from keras.models import Sequential
from keras.layers import Layer, Conv2D, BatchNormalization, Activation, MaxPooling2D, Reshape, Permute, UpSampling2D, ZeroPadding2D

from keras import backend as K

K.set_image_data_format("channels_first")


def model(img_channels=3, img_width=256, img_height=256, classes=7):
    """define a basic segnet model."""
    model = Sequential()

    # encoder
    model.add(
        ZeroPadding2D(padding=1,
                      input_shape=(img_channels, img_height, img_width)))
    model.add(Conv2D(filters=64, kernel_size=3, padding="valid"))
    model.add(BatchNormalization())
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=2))

    model.add(ZeroPadding2D(padding=1))
    model.add(Conv2D(filters=128, kernel_size=3, padding="valid"))
    model.add(BatchNormalization())
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=2))
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import math
from scipy.io import loadmat

from keras.models import Sequential, Model, load_model
from keras.layers import Input, Dense, Flatten, Dropout, Activation, Lambda, Permute, Reshape
from keras.layers import Convolution2D, ZeroPadding2D, MaxPooling2D

from keras import backend as K
import cv2

K.set_image_data_format(
    'channels_last'
)  # WARNING : important for images and tensors dimensions ordering


def convblock(cdim, nb, bits=3):
    L = []
    for k in range(1, bits + 1):
        convname = 'conv' + str(nb) + '_' + str(k)
        L.append(
            Convolution2D(cdim,
                          kernel_size=(3, 3),
                          padding='same',
                          activation='relu',
                          name=convname))
    L.append(MaxPooling2D((2, 2), strides=(2, 2)))
    return L
示例#43
0
import random
from keras.utils import np_utils
from keras.layers import Input, Conv2D, Dense,MaxPooling2D, Reshape,Flatten, Activation,Dense, Dropout, BatchNormalization,GlobalAveragePooling2D
from keras.models import Model
from keras.backend import tf as ktf
from keras import optimizers
from keras.callbacks import History, EarlyStopping,ReduceLROnPlateau,CSVLogger,ModelCheckpoint
import keras.backend as K
from keras.engine import Layer
import sys,os
from DLUtils.evaluate import DemographicClassifier
from DLUtils.DataGenerator import face_12net_train_generator, face_12net_eval_generator
from DLUtils.MemoryReqs import get_model_memory_usage,model_memory_params
from DLUtils.configs import get_configs

K.set_image_data_format('channels_last')


np.random.seed(123)





def net12_model():

    input_shape = (12, 12, 3)
    nb_classes = 2

    x_input = Input(input_shape)
    # Conv Layer 1 # Input (12,12,3)
示例#44
0
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.utils import get_file
from keras import backend as K
# K.set_image_dim_ordering('tf')
K.image_data_format() == 'channels_last'
#from keras.utils import np_utils

#from keras.datasets import mnist
'''
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Conv2D, Flatten, MaxPooling2D, Dropout, BatchNormalization
from tensorflow.keras.utils import to_categorical
from keras import backend as K
from tensorflow.keras.utils import get_file
from tensorflow.keras.utils import plot_model
from tensorflow.keras import backend
from tensorflow.python.keras import backend as k