示例#1
0
def stabilize(operand):
    scalar_constant = 4.0
    f = Constant.scalar(sanitize_dtype_cntk(np.float32), scalar_constant);
    fInv = Constant.scalar(sanitize_dtype_cntk(np.float32), 1.0 / scalar_constant)

    beta = element_times(fInv, log(Constant.scalar(sanitize_dtype_cntk(np.float32), 1.0) + exp(element_times(f, parameter(shape=(), value=0.99537863)))))
    return element_times(beta, operand)
示例#2
0
def create_model(base_model_file, feature_node_name, last_hidden_node_name, num_classes, input_features, freeze=False):
    # Load the pretrained classification net and find nodes
    base_model = load_model(base_model_file)
    feature_node = None
    last_node = None
    # feature node
    for n in cntk.logging.graph.depth_first_search(base_model, (lambda x: True)):
        if (n.name.strip() == feature_node_name) or (n.uid.strip() == feature_node_name):
            feature_node = n
    print("feature node:", feature_node)
    if feature_node is None:
        raise Exception("Failed to locate feature node: " + feature_node_name)
    # last hidden node
    for n in cntk.logging.get_node_outputs(base_model):
        if (n.name.strip() == last_hidden_node_name) or (n.uid.strip() == last_hidden_node_name):
            last_node = n
    print("last hidden node:", last_node)
    if last_node is None:
        raise Exception("Failed to locate last hidden node: " + last_hidden_node_name)

    # Clone the desired layers with fixed weights
    cloned_layers = combine([last_node.owner]).clone(
        CloneMethod.freeze if freeze else CloneMethod.clone,
        {feature_node: placeholder(name='features')})

    # Add new dense layer for class prediction
    feat_norm = input_features - Constant(114)
    cloned_out = cloned_layers(feat_norm)
    z = Dense(num_classes, activation=None, name=new_output_node_name) (cloned_out)

    return z
示例#3
0
def test_op_reduce_mean_all_constant(input_data, axis, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    value = AA(input_data, dtype=dt)
    from .. import reduce_mean
    from cntk import Axis, Constant
    a = Constant(value, name='a')
    input_op = reduce_mean(a, axis=Axis.all_axes())
    expected_forward = AA(np.mean(value))
    actual_forward = input_op.eval()
    assert np.allclose(actual_forward, expected_forward)
示例#4
0
def test_op_reduce_mean_all_constant(input_data, axis, device_id, precision):
    # dt = PRECISION_TO_TYPE[precision]
    # FIXME: we'd like to do dt = PRECISION_TO_TYPE[precision]
    # however there seems to be an issue with actual_forward below
    # that gets computed correctly but by the time np.allclose executes
    # it contains garbage values. The problem goes away if one uses
    # actual_forward  = np.copy(input_op.eval())
    dt = np.float32
    value = AA(input_data, dtype=dt)
    from .. import reduce_mean
    from cntk import Axis, Constant
    a = Constant(value, name='a')
    input_op = reduce_mean(a, axis=Axis.all_axes())
    expected_forward = AA(np.mean(value))
    actual_forward = input_op.eval()
    assert np.allclose(actual_forward, expected_forward)
def create_model(base_model_file, feature_node_name, last_hidden_node_name, num_classes, input_features, freeze=False):
    # Load the pretrained classification net and find nodes
    base_model   = load_model(base_model_file)
    feature_node = find_by_name(base_model, feature_node_name)
    last_node    = find_by_name(base_model, last_hidden_node_name)

    # Clone the desired layers with fixed weights
    cloned_layers = combine([last_node.owner]).clone(
        CloneMethod.freeze if freeze else CloneMethod.clone,
        {feature_node: placeholder(name='features')})

    # Add new dense layer for class prediction
    feat_norm  = input_features - Constant(114)
    cloned_out = cloned_layers(feat_norm)
    z          = Dense(num_classes, activation=None, name=new_output_node_name) (cloned_out)

    return z
示例#6
0
 def FU(x):
     return UF(Constant(1), x)