def create_model(base_model_file,
                 input_features,
                 num_classes,
                 dropout_rate=0.5,
                 freeze_weights=False):
    # Load the pretrained classification net and find nodes
    base_model = load_model(base_model_file)
    feature_node = find_by_name(base_model, 'features')
    beforePooling_node = find_by_name(base_model, "z.x.x.r")
    #graph.plot(base_model, filename="base_model.pdf") # Write graph visualization

    # Clone model until right before the pooling layer, ie. until including z.x.x.r
    modelCloned = combine([beforePooling_node.owner]).clone(
        CloneMethod.freeze if freeze_weights else CloneMethod.clone,
        {feature_node: placeholder(name='features')})

    # Center the input around zero and set model input.
    # Do this early, to avoid CNTK bug with wrongly estimated layer shapes
    feat_norm = input_features - constant(114)
    model = modelCloned(feat_norm)

    # Pool over all spatial dimensions and add dropout layer
    avgPool = GlobalAveragePooling(name="poolingLayer")(model)
    if dropout_rate > 0:
        avgPoolDrop = Dropout(dropout_rate)(avgPool)
    else:
        avgPoolDrop = avgPool

    # Add new dense layer for class prediction
    finalModel = Dense(num_classes, activation=None,
                       name="prediction")(avgPoolDrop)
    return finalModel
Exemplo n.º 2
0
def create_resnet_model(input, num_classes):
    conv = convolution_bn(input, (3, 3), 16)
    r1_1 = resnet_basic_stack(conv, 16, 3)

    r2_1 = resnet_basic_inc(r1_1, 32)
    r2_2 = resnet_basic_stack(r2_1, 32, 2)

    r3_1 = resnet_basic_inc(r2_2, 64)
    r3_2 = resnet_basic_stack(r3_1, 64, 2)

    pool = GlobalAveragePooling()(r3_2)
    net = Dense(num_classes, init=he_normal(), activation=None)(pool)

    return net
Exemplo n.º 3
0
def create_model(input, num_stack_layers, num_classes):
    c_map = [16, 32, 64]

    conv = conv_bn_relu(input, (3, 3), c_map[0])
    r1 = resnet_basic_stack(conv, num_stack_layers, c_map[0])

    r2_1 = resnet_basic_inc(r1, c_map[1])
    r2_2 = resnet_basic_stack(r2_1, num_stack_layers - 1, c_map[1])

    r3_1 = resnet_basic_inc(r2_2, c_map[2])
    r3_2 = resnet_basic_stack(r3_1, num_stack_layers - 1, c_map[2])

    # Global average pooling and output
    pool = GlobalAveragePooling(name='final_avg_pooling')(r3_2)
    z = Dense(num_classes, init=normal(0.01))(pool)
    return z
Exemplo n.º 4
0
def create_model(base_model_file, input_features, params):
    num_classes = params['num_classes']
    dropout_rate = params['dropout_rate']
    freeze_weights = params['freeze_weights']

    # Load the pretrained classification net and find nodes
    base_model = load_model(base_model_file)
    log = logging.getLogger("neuralnets1.utils.create_model")
    log.info('Loaded base model - %s with layers:' % base_model_file)
    node_outputs = get_node_outputs(base_model)
    [log.info('%s , %s' % (layer.name, layer.shape)) for layer in node_outputs]
    graph.plot(base_model,
               filename="base_model.pdf")  # Write graph visualization

    feature_node = find_by_name(base_model, 'features')
    beforePooling_node = find_by_name(base_model, "z.x.x.r")

    # Clone model until right before the pooling layer, ie. until including z.x.x.r
    modelCloned = combine([beforePooling_node.owner]).clone(
        CloneMethod.freeze if freeze_weights else CloneMethod.clone,
        {feature_node: placeholder(name='features')})

    # Center the input around zero and set model input.
    # Do this early, to avoid CNTK bug with wrongly estimated layer shapes
    feat_norm = input_features - constant(114)
    model = modelCloned(feat_norm)

    # Add pool layer
    avgPool = GlobalAveragePooling(name="poolingLayer")(
        model)  # assign name to the layer and add to the model
    # Add drop out layer
    if dropout_rate > 0:
        avgPoolDrop = Dropout(dropout_rate)(
            avgPool
        )  # add drop out layer with specified drop out rate and add it to the model
    else:
        avgPoolDrop = avgPool

    # Add new dense layer for class prediction
    finalModel = Dense(num_classes, activation=None, name="Dense")(avgPoolDrop)
    return finalModel
Exemplo n.º 5
0
def convnetlrn_cifar10_dataaug(reader_train,
                               reader_test,
                               epoch_size=50000,
                               max_epochs=80):
    _cntk_py.set_computation_network_trace_level(0)

    # Input variables denoting the features and label data
    input_var = C.input_variable((num_channels, image_height, image_width))
    label_var = C.input_variable((num_classes))

    # input normalization 1/256 = 0.00396025
    scaled_input = C.element_times(C.constant(0.00390625), input_var)
    f = GlobalAveragePooling()
    f.update_signature((1, 8, 8))

    with C.layers.default_options():
        z = C.layers.Sequential([
            C.layers.For(
                range(1), lambda: [
                    C.layers.Convolution2D(
                        (3, 3), 32, strides=(1, 1), pad=True),
                    C.layers.Activation(activation=C.relu),
                    C.layers.Convolution2D(
                        (1, 1), 64, strides=(1, 1), pad=False),
                    C.layers.MaxPooling((3, 3), strides=(2, 2), pad=True),
                    C.layers.Dropout(0.5)
                ]),
            C.layers.For(
                range(1), lambda: [
                    C.layers.Convolution2D(
                        (3, 3), 128, strides=(1, 1), pad=True),
                    C.layers.Activation(activation=C.relu),
                    C.layers.Convolution2D(
                        (1, 1), 160, strides=(1, 1), pad=False),
                    C.layers.Activation(activation=C.relu),
                    C.layers.MaxPooling((3, 3), strides=(2, 2), pad=True),
                    C.layers.Dropout(0.5)
                ]),
            C.layers.For(
                range(1), lambda: [
                    C.layers.Convolution2D(
                        (3, 3), 192, strides=(1, 1), pad=True),
                    C.layers.Activation(activation=C.relu),
                    C.layers.Convolution2D(
                        (1, 1), 256, strides=(1, 1), pad=False),
                    C.layers.Activation(activation=C.relu),
                    C.layers.Convolution2D(
                        (1, 1), 10, strides=(1, 1), pad=False),
                    C.layers.Activation(activation=C.relu),
                    C.layers.AveragePooling((8, 8), strides=(1, 1), pad=False)
                ])
        ])(scaled_input)

    print('z.shape', z.shape)
    z = C.flatten(z)

    print('z.shape now', z.shape)

    # loss and metric
    ce = C.cross_entropy_with_softmax(z, label_var)
    pe = C.classification_error(z, label_var)

    # training config
    minibatch_size = 64
    # Set learning parameters
    # learning rate
    lr_per_sample = [0.0015625] * 20 + [0.00046875] * 20 + [
        0.00015625
    ] * 20 + [0.000046875] * 10 + [0.000015625]
    lr_schedule = C.learning_parameter_schedule_per_sample(
        lr_per_sample, epoch_size=epoch_size)
    # momentum
    mms = [0] * 20 + [0.9983347214509387] * 20 + [0.9991670137924583]
    mm_schedule = C.learners.momentum_schedule_per_sample(
        mms, epoch_size=epoch_size)
    l2_reg_weight = 0.002

    # trainer object
    learner = C.learners.momentum_sgd(z.parameters,
                                      lr_schedule,
                                      mm_schedule,
                                      unit_gain=True,
                                      l2_regularization_weight=l2_reg_weight)
    progress_printer = C.logging.ProgressPrinter(tag='Training',
                                                 num_epochs=max_epochs)
    trainer = C.Trainer(z, (ce, pe), learner, progress_printer)

    # define mapping from reader streams to network inputs
    input_map = {
        input_var: reader_train.streams.features,
        label_var: reader_train.streams.labels
    }

    C.logging.log_number_of_parameters(z)
    print()
    # perform model training
    for epoch in range(max_epochs):  # loop over epochs
        sample_count = 0

        while sample_count < epoch_size:  # loop over minibatches in the epoch
            data = reader_train.next_minibatch(
                min(minibatch_size, epoch_size - sample_count),
                input_map=input_map)  # fetch minibatch.
            trainer.train_minibatch(data)  # update model with it
            sample_count += trainer.previous_minibatch_sample_count  # count samples processed so far

        trainer.summarize_training_progress()

    # save model
    modelname = "NIN_test1.dnn"
    z.save(os.path.join(model_path, modelname))

    ### Evaluation action
    epoch_size = 10000
    minibatch_size = 16
    # process minibatches and evaluate the model
    metric_numer = 0
    metric_denom = 0
    sample_count = 0
    minibatch_index = 0

    while sample_count < epoch_size:
        current_minibatch = min(minibatch_size, epoch_size - sample_count)
        data = reader_test.next_minibatch(current_minibatch,
                                          input_map=input_map)
        metric_numer += trainer.test_minibatch(data) * current_minibatch
        metric_denom += current_minibatch
        sample_count += current_minibatch
        minibatch_index += 1

    print("")
    print("Final Results: Minibatch[1-{}]: errs = {:0.2f}% * {}".format(
        minibatch_index + 1, (metric_numer * 100.0) / metric_denom,
        metric_denom))
    print("")

    return metric_numer / metric_denom